mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor files
This commit is contained in:
125
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
Normal file
125
vendor/k8s.io/kubernetes/test/e2e/BUILD
generated
vendored
Normal file
@ -0,0 +1,125 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["e2e_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e",
|
||||
library = ":go_default_library",
|
||||
tags = ["e2e"],
|
||||
deps = [
|
||||
"//test/e2e/apimachinery:go_default_library",
|
||||
"//test/e2e/apps:go_default_library",
|
||||
"//test/e2e/auth:go_default_library",
|
||||
"//test/e2e/autoscaling:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/instrumentation:go_default_library",
|
||||
"//test/e2e/kubectl:go_default_library",
|
||||
"//test/e2e/lifecycle:go_default_library",
|
||||
"//test/e2e/lifecycle/bootstrap:go_default_library",
|
||||
"//test/e2e/multicluster:go_default_library",
|
||||
"//test/e2e/network:go_default_library",
|
||||
"//test/e2e/node:go_default_library",
|
||||
"//test/e2e/scalability:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/e2e/servicecatalog:go_default_library",
|
||||
"//test/e2e/storage:go_default_library",
|
||||
"//test/e2e/ui:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"e2e.go",
|
||||
"examples.go",
|
||||
"gke_local_ssd.go",
|
||||
"gke_node_pools.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e",
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/kubectl/util/logs:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/generated:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
# This is a handwritten rule. Do not delete, it will not be regenerated by
|
||||
# update-bazel.sh.
|
||||
genrule(
|
||||
name = "gen_e2e.test",
|
||||
testonly = 1,
|
||||
srcs = [":go_default_test"],
|
||||
outs = ["e2e.test"],
|
||||
cmd = "srcs=($(SRCS)); cp $$(dirname $${srcs[0]})/go_default_test $@;",
|
||||
output_to_bindir = 1,
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/apimachinery:all-srcs",
|
||||
"//test/e2e/apps:all-srcs",
|
||||
"//test/e2e/auth:all-srcs",
|
||||
"//test/e2e/autoscaling:all-srcs",
|
||||
"//test/e2e/chaosmonkey:all-srcs",
|
||||
"//test/e2e/common:all-srcs",
|
||||
"//test/e2e/framework:all-srcs",
|
||||
"//test/e2e/generated:all-srcs",
|
||||
"//test/e2e/instrumentation:all-srcs",
|
||||
"//test/e2e/kubectl:all-srcs",
|
||||
"//test/e2e/lifecycle:all-srcs",
|
||||
"//test/e2e/manifest:all-srcs",
|
||||
"//test/e2e/multicluster:all-srcs",
|
||||
"//test/e2e/network:all-srcs",
|
||||
"//test/e2e/node:all-srcs",
|
||||
"//test/e2e/perftype:all-srcs",
|
||||
"//test/e2e/scalability:all-srcs",
|
||||
"//test/e2e/scheduling:all-srcs",
|
||||
"//test/e2e/servicecatalog:all-srcs",
|
||||
"//test/e2e/storage:all-srcs",
|
||||
"//test/e2e/testing-manifests:all-srcs",
|
||||
"//test/e2e/ui:all-srcs",
|
||||
"//test/e2e/upgrades:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
3
vendor/k8s.io/kubernetes/test/e2e/README.md
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/test/e2e/README.md
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
See [e2e-tests](https://git.k8s.io/community/contributors/devel/e2e-tests.md)
|
||||
|
||||
[]()
|
86
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
Normal file
86
vendor/k8s.io/kubernetes/test/e2e/apimachinery/BUILD
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"aggregator.go",
|
||||
"certs.go",
|
||||
"chunking.go",
|
||||
"custom_resource_definition.go",
|
||||
"etcd_failure.go",
|
||||
"framework.go",
|
||||
"garbage_collector.go",
|
||||
"generated_clientset.go",
|
||||
"initializers.go",
|
||||
"namespace.go",
|
||||
"table_conversion.go",
|
||||
"webhook.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/apimachinery",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//test/e2e/apps:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
458
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
Normal file
458
vendor/k8s.io/kubernetes/test/e2e/apimachinery/aggregator.go
generated
vendored
Normal file
@ -0,0 +1,458 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/discovery"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.7.0")
|
||||
|
||||
var _ = SIGDescribe("Aggregator", func() {
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
var aggrclient *aggregatorclient.Clientset
|
||||
f := framework.NewDefaultFramework("aggregator")
|
||||
framework.AddCleanupAction(func() {
|
||||
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
|
||||
if len(ns) > 0 {
|
||||
cleanTest(c, aggrclient, ns)
|
||||
}
|
||||
})
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
aggrclient = f.AggregatorClient
|
||||
})
|
||||
|
||||
It("Should be able to support the 1.7 Sample API Server using the current Aggregator", func() {
|
||||
// Make sure the relevant provider supports Agggregator
|
||||
framework.SkipUnlessServerVersionGTE(serverAggregatorVersion, f.ClientSet.Discovery())
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
|
||||
// Testing a 1.7 version of the sample-apiserver
|
||||
TestSampleAPIServer(f, "gcr.io/kubernetes-e2e-test-images/k8s-aggregator-sample-apiserver-amd64:1.7v2")
|
||||
})
|
||||
})
|
||||
|
||||
func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) {
|
||||
// delete the APIService first to avoid causing discovery errors
|
||||
_ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil)
|
||||
|
||||
_ = client.ExtensionsV1beta1().Deployments(namespace).Delete("sample-apiserver", nil)
|
||||
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
|
||||
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
|
||||
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
|
||||
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete("wardler-auth-reader", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoles().Delete("wardler", nil)
|
||||
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":anonymous", nil)
|
||||
}
|
||||
|
||||
// A basic test if the sample-apiserver code from 1.7 and compiled against 1.7
|
||||
// will work on the current Aggregator/API-Server.
|
||||
func TestSampleAPIServer(f *framework.Framework, image string) {
|
||||
By("Registering the sample API server.")
|
||||
client := f.ClientSet
|
||||
restClient := client.Discovery().RESTClient()
|
||||
iclient := f.InternalClientset
|
||||
aggrclient := f.AggregatorClient
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
context := setupServerCert(namespace, "sample-api")
|
||||
if framework.ProviderIs("gke") {
|
||||
// kubectl create clusterrolebinding user-cluster-admin-binding --clusterrole=cluster-admin --user=user@domain.com
|
||||
authenticated := rbacv1beta1.Subject{Kind: rbacv1beta1.GroupKind, Name: user.AllAuthenticated}
|
||||
framework.BindClusterRole(client.RbacV1beta1(), "cluster-admin", namespace, authenticated)
|
||||
}
|
||||
|
||||
// kubectl create -f namespace.yaml
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace. As the test framework is setting up a new namespace, we are just using that.
|
||||
|
||||
// kubectl create -f secret.yaml
|
||||
secretName := "sample-apiserver-secret"
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": context.cert,
|
||||
"tls.key": context.key,
|
||||
},
|
||||
}
|
||||
_, err := client.CoreV1().Secrets(namespace).Create(secret)
|
||||
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
|
||||
|
||||
// kubectl create -f deploy.yaml
|
||||
deploymentName := "sample-apiserver-deployment"
|
||||
etcdImage := "quay.io/coreos/etcd:v3.1.10"
|
||||
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
mounts := []v1.VolumeMount{
|
||||
{
|
||||
Name: "apiserver-certs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/apiserver.local.config/certificates",
|
||||
},
|
||||
}
|
||||
volumes := []v1.Volume{
|
||||
{
|
||||
Name: "apiserver-certs",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{SecretName: secretName},
|
||||
},
|
||||
},
|
||||
}
|
||||
containers := []v1.Container{
|
||||
{
|
||||
Name: "sample-apiserver",
|
||||
VolumeMounts: mounts,
|
||||
Args: []string{
|
||||
"--etcd-servers=http://localhost:2379",
|
||||
"--tls-cert-file=/apiserver.local.config/certificates/tls.crt",
|
||||
"--tls-private-key-file=/apiserver.local.config/certificates/tls.key",
|
||||
"--audit-log-path=-",
|
||||
"--audit-log-maxage=0",
|
||||
"--audit-log-maxbackup=0",
|
||||
},
|
||||
Image: image,
|
||||
},
|
||||
{
|
||||
Name: "etcd",
|
||||
Image: etcdImage,
|
||||
},
|
||||
}
|
||||
d := &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", etcdImage)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s to complete", etcdImage, deploymentName, namespace)
|
||||
|
||||
// kubectl create -f service.yaml
|
||||
serviceLabels := map[string]string{"apiserver": "true"}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: "sample-api",
|
||||
Labels: map[string]string{"test": "aggregator"},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: serviceLabels,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: "TCP",
|
||||
Port: 443,
|
||||
TargetPort: intstr.FromInt(443),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().Services(namespace).Create(service)
|
||||
framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-apiserver", namespace)
|
||||
|
||||
// kubectl create -f serviceAccount.yaml
|
||||
sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}}
|
||||
_, err = client.CoreV1().ServiceAccounts(namespace).Create(sa)
|
||||
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
|
||||
|
||||
// kubectl create -f authDelegator.yaml
|
||||
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler:" + namespace + ":anonymous",
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "wardler",
|
||||
},
|
||||
Subjects: []rbacv1beta1.Subject{
|
||||
{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "User",
|
||||
Name: namespace + ":anonymous",
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":anonymous")
|
||||
|
||||
// kubectl create -f role.yaml
|
||||
resourceRule, err := rbacapi.NewRule("create", "delete", "deletecollection", "get", "list", "patch", "update", "watch").Groups("wardle.k8s.io").Resources("flunders").Rule()
|
||||
framework.ExpectNoError(err, "creating cluster resource rule")
|
||||
urlRule, err := rbacapi.NewRule("get").URLs("*").Rule()
|
||||
framework.ExpectNoError(err, "creating cluster url rule")
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
roleLabels := map[string]string{"kubernetes.io/bootstrapping": "wardle-default"}
|
||||
role := rbacapi.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler",
|
||||
Labels: roleLabels,
|
||||
},
|
||||
Rules: []rbacapi.PolicyRule{resourceRule, urlRule},
|
||||
}
|
||||
_, err = iclient.Rbac().ClusterRoles().Create(&role)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "creating cluster role wardler - may not have permissions")
|
||||
|
||||
// kubectl create -f auth-reader.yaml
|
||||
_, err = client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wardler-auth-reader",
|
||||
Annotations: map[string]string{
|
||||
rbacv1beta1.AutoUpdateAnnotationKey: "true",
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "",
|
||||
Kind: "Role",
|
||||
Name: "extension-apiserver-authentication-reader",
|
||||
},
|
||||
Subjects: []rbacv1beta1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "default", // "sample-apiserver",
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating role binding %s:sample-apiserver to access configMap", namespace)
|
||||
|
||||
// kubectl create -f apiservice.yaml
|
||||
_, err = aggrclient.ApiregistrationV1beta1().APIServices().Create(&apiregistrationv1beta1.APIService{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.k8s.io"},
|
||||
Spec: apiregistrationv1beta1.APIServiceSpec{
|
||||
Service: &apiregistrationv1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: "sample-api",
|
||||
},
|
||||
Group: "wardle.k8s.io",
|
||||
Version: "v1alpha1",
|
||||
CABundle: context.signingCert,
|
||||
GroupPriorityMinimum: 2000,
|
||||
VersionPriority: 200,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "creating apiservice %s with namespace %s", "v1alpha1.wardle.k8s.io", namespace)
|
||||
|
||||
// Wait for the extension apiserver to be up and healthy
|
||||
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
|
||||
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
|
||||
// is setting up a new namespace, we are just using that.
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
|
||||
// We seem to need to do additional waiting until the extension api service is actually up.
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
request := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders")
|
||||
request.SetHeader("Accept", "application/json")
|
||||
_, err := request.DoRaw()
|
||||
if err != nil {
|
||||
status, ok := err.(*apierrs.StatusError)
|
||||
if !ok {
|
||||
return false, err
|
||||
}
|
||||
if status.Status().Code == 404 && strings.HasPrefix(err.Error(), "the server could not find the requested resource") {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "gave up waiting for apiservice wardle to come up successfully")
|
||||
|
||||
flunderName := generateFlunderName("rest-flunder")
|
||||
|
||||
// kubectl create -f flunders-1.yaml -v 9
|
||||
// curl -k -v -XPOST https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
|
||||
// Request Body: {"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
|
||||
flunder := `{"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"` + flunderName + `","namespace":"default"}}`
|
||||
result := restClient.Post().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders").Body([]byte(flunder)).Do()
|
||||
framework.ExpectNoError(result.Error(), "creating a new flunders resource")
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
if statusCode != 201 {
|
||||
framework.Failf("Flunders client creation response was status %d, not 201", statusCode)
|
||||
}
|
||||
|
||||
pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(result.Error(), "getting pods for flunders service")
|
||||
|
||||
// kubectl get flunders -v 9
|
||||
// curl -k -v -XGET https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
|
||||
contents, err := restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw()
|
||||
framework.ExpectNoError(err, "attempting to get a newly created flunders resource")
|
||||
var flundersList samplev1alpha1.FlunderList
|
||||
err = json.Unmarshal(contents, &flundersList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
|
||||
if len(flundersList.Items) != 1 {
|
||||
framework.Failf("failed to get back the correct flunders list %v", flundersList)
|
||||
}
|
||||
|
||||
// kubectl delete flunder test-flunder -v 9
|
||||
// curl -k -v -XDELETE https://35.193.112.40/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders/test-flunder
|
||||
_, err = restClient.Delete().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders/" + flunderName).DoRaw()
|
||||
validateErrorWithDebugInfo(f, err, pods, "attempting to delete a newly created flunders(%v) resource", flundersList.Items)
|
||||
|
||||
// kubectl get flunders -v 9
|
||||
// curl -k -v -XGET https://localhost/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders
|
||||
contents, err = restClient.Get().AbsPath("/apis/wardle.k8s.io/v1alpha1/namespaces/default/flunders").SetHeader("Accept", "application/json").DoRaw()
|
||||
framework.ExpectNoError(err, "confirming delete of a newly created flunders resource")
|
||||
err = json.Unmarshal(contents, &flundersList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "Error in unmarshalling %T response from server %s", contents, "/apis/wardle.k8s.io/v1alpha1")
|
||||
if len(flundersList.Items) != 0 {
|
||||
framework.Failf("failed to get back the correct deleted flunders list %v", flundersList)
|
||||
}
|
||||
|
||||
flunderName = generateFlunderName("dynamic-flunder")
|
||||
|
||||
// Rerun the Create/List/Delete tests using the Dynamic client.
|
||||
resources, err := client.Discovery().ServerPreferredNamespacedResources()
|
||||
framework.ExpectNoError(err, "getting server preferred namespaces resources for dynamic client")
|
||||
groupVersionResources, err := discovery.GroupVersionResources(resources)
|
||||
framework.ExpectNoError(err, "getting group version resources for dynamic client")
|
||||
gvr := schema.GroupVersionResource{Group: "wardle.k8s.io", Version: "v1alpha1", Resource: "flunders"}
|
||||
_, ok := groupVersionResources[gvr]
|
||||
if !ok {
|
||||
framework.Failf("could not find group version resource for dynamic client and wardle/flunders.")
|
||||
}
|
||||
clientPool := f.ClientPool
|
||||
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
|
||||
framework.ExpectNoError(err, "getting group version resources for dynamic client")
|
||||
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
|
||||
|
||||
// kubectl create -f flunders-1.yaml
|
||||
// Request Body: {"apiVersion":"wardle.k8s.io/v1alpha1","kind":"Flunder","metadata":{"labels":{"sample-label":"true"},"name":"test-flunder","namespace":"default"}}
|
||||
testFlunder := samplev1alpha1.Flunder{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Flunder",
|
||||
APIVersion: "wardle.k8s.io/v1alpha1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: flunderName},
|
||||
Spec: samplev1alpha1.FlunderSpec{},
|
||||
}
|
||||
jsonFlunder, err := json.Marshal(testFlunder)
|
||||
framework.ExpectNoError(err, "marshalling test-flunder for create using dynamic client")
|
||||
unstruct := &unstructuredv1.Unstructured{}
|
||||
err = unstruct.UnmarshalJSON(jsonFlunder)
|
||||
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
|
||||
unstruct, err = dynamicClient.Resource(&apiResource, namespace).Create(unstruct)
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
|
||||
// kubectl get flunders
|
||||
obj, err := dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
unstructuredList, ok := obj.(*unstructuredv1.UnstructuredList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "casting flunders list(%T) as unstructuredList using dynamic client", obj)
|
||||
if len(unstructuredList.Items) != 1 {
|
||||
framework.Failf("failed to get back the correct flunders list %v from the dynamic client", unstructuredList)
|
||||
}
|
||||
|
||||
// kubectl delete flunder test-flunder
|
||||
err = dynamicClient.Resource(&apiResource, namespace).Delete(flunderName, &metav1.DeleteOptions{})
|
||||
validateErrorWithDebugInfo(f, err, pods, "deleting flunders(%v) using dynamic client", unstructuredList.Items)
|
||||
|
||||
// kubectl get flunders
|
||||
obj, err = dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing flunders using dynamic client")
|
||||
unstructuredList, ok = obj.(*unstructuredv1.UnstructuredList)
|
||||
validateErrorWithDebugInfo(f, err, pods, "casting flunders list(%T) as unstructuredList using dynamic client", obj)
|
||||
if len(unstructuredList.Items) != 0 {
|
||||
framework.Failf("failed to get back the correct deleted flunders list %v from the dynamic client", unstructuredList)
|
||||
}
|
||||
|
||||
cleanTest(client, aggrclient, namespace)
|
||||
}
|
||||
|
||||
func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodList, msg string, fields ...interface{}) {
|
||||
if err != nil {
|
||||
namespace := f.Namespace.Name
|
||||
msg := fmt.Sprintf(msg, fields...)
|
||||
msg += fmt.Sprintf(" but received unexpected error:\n%v", err)
|
||||
client := f.ClientSet
|
||||
ep, err := client.CoreV1().Endpoints(namespace).Get("sample-api", metav1.GetOptions{})
|
||||
if err == nil {
|
||||
msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep)
|
||||
}
|
||||
pds, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds)
|
||||
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)
|
||||
}
|
||||
|
||||
framework.Failf(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func generateFlunderName(base string) string {
|
||||
id, err := rand.Int(rand.Reader, big.NewInt(2147483647))
|
||||
if err != nil {
|
||||
return base
|
||||
}
|
||||
return fmt.Sprintf("%s-%d", base, id)
|
||||
}
|
90
vendor/k8s.io/kubernetes/test/e2e/apimachinery/certs.go
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/test/e2e/apimachinery/certs.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
type certContext struct {
|
||||
cert []byte
|
||||
key []byte
|
||||
signingCert []byte
|
||||
}
|
||||
|
||||
// Setup the server cert. For example, user apiservers and admission webhooks
|
||||
// can use the cert to prove their identify to the kube-apiserver
|
||||
func setupServerCert(namespaceName, serviceName string) *certContext {
|
||||
certDir, err := ioutil.TempDir("", "test-e2e-server-cert")
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create a temp dir for cert generation %v", err)
|
||||
}
|
||||
defer os.RemoveAll(certDir)
|
||||
signingKey, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create CA private key %v", err)
|
||||
}
|
||||
signingCert, err := cert.NewSelfSignedCACert(cert.Config{CommonName: "e2e-server-cert-ca"}, signingKey)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create CA cert for apiserver %v", err)
|
||||
}
|
||||
caCertFile, err := ioutil.TempFile(certDir, "ca.crt")
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create a temp file for ca cert generation %v", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(caCertFile.Name(), cert.EncodeCertPEM(signingCert), 0644); err != nil {
|
||||
framework.Failf("Failed to write CA cert %v", err)
|
||||
}
|
||||
key, err := cert.NewPrivateKey()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create private key for %v", err)
|
||||
}
|
||||
signedCert, err := cert.NewSignedCert(
|
||||
cert.Config{
|
||||
CommonName: serviceName + "." + namespaceName + ".svc",
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
},
|
||||
key, signingCert, signingKey,
|
||||
)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create cert%v", err)
|
||||
}
|
||||
certFile, err := ioutil.TempFile(certDir, "server.crt")
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create a temp file for cert generation %v", err)
|
||||
}
|
||||
keyFile, err := ioutil.TempFile(certDir, "server.key")
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create a temp file for key generation %v", err)
|
||||
}
|
||||
if err = ioutil.WriteFile(certFile.Name(), cert.EncodeCertPEM(signedCert), 0600); err != nil {
|
||||
framework.Failf("Failed to write cert file %v", err)
|
||||
}
|
||||
if err = ioutil.WriteFile(keyFile.Name(), cert.EncodePrivateKeyPEM(key), 0644); err != nil {
|
||||
framework.Failf("Failed to write key file %v", err)
|
||||
}
|
||||
return &certContext{
|
||||
cert: cert.EncodeCertPEM(signedCert),
|
||||
key: cert.EncodePrivateKeyPEM(key),
|
||||
signingCert: cert.EncodeCertPEM(signingCert),
|
||||
}
|
||||
}
|
104
vendor/k8s.io/kubernetes/test/e2e/apimachinery/chunking.go
generated
vendored
Normal file
104
vendor/k8s.io/kubernetes/test/e2e/apimachinery/chunking.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const numberOfTotalResources = 400
|
||||
|
||||
var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
f := framework.NewDefaultFramework("chunking")
|
||||
|
||||
It("should return chunks of results for list calls", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("creating a large number of resources")
|
||||
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("template-%04d", i),
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "test", Image: "test2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
|
||||
By("retrieving those results in paged fashion several times")
|
||||
for i := 0; i < 3; i++ {
|
||||
opts := metav1.ListOptions{}
|
||||
found := 0
|
||||
var lastRV string
|
||||
for {
|
||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||
list, err := client.List(opts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
|
||||
if len(list.Items) > int(opts.Limit) {
|
||||
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
|
||||
}
|
||||
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
|
||||
|
||||
if len(lastRV) == 0 {
|
||||
lastRV = list.ResourceVersion
|
||||
}
|
||||
if lastRV != list.ResourceVersion {
|
||||
Expect(list.ResourceVersion).To(Equal(lastRV))
|
||||
}
|
||||
for _, item := range list.Items {
|
||||
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
|
||||
found++
|
||||
}
|
||||
if len(list.Continue) == 0 {
|
||||
break
|
||||
}
|
||||
opts.Continue = list.Continue
|
||||
}
|
||||
Expect(found).To(BeNumerically("==", numberOfTotalResources))
|
||||
}
|
||||
|
||||
By("retrieving those results all at once")
|
||||
list, err := client.List(metav1.ListOptions{Limit: numberOfTotalResources + 1})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(list.Items).To(HaveLen(numberOfTotalResources))
|
||||
})
|
||||
})
|
71
vendor/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go
generated
vendored
Normal file
71
vendor/k8s.io/kubernetes/test/e2e/apimachinery/custom_resource_definition.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var crdVersion = utilversion.MustParseSemantic("v1.7.0")
|
||||
|
||||
var _ = SIGDescribe("CustomResourceDefinition resources", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("custom-resource-definition")
|
||||
|
||||
Context("Simple CustomResourceDefinition", func() {
|
||||
/*
|
||||
Testname: crd-creation-test
|
||||
Description: Create a random Custom Resource Definition and make sure
|
||||
the API returns success.
|
||||
*/
|
||||
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
|
||||
|
||||
framework.SkipUnlessServerVersionGTE(crdVersion, f.ClientSet.Discovery())
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
_, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.ClientPool)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}()
|
||||
})
|
||||
})
|
||||
})
|
140
vendor/k8s.io/kubernetes/test/e2e/apimachinery/etcd_failure.go
generated
vendored
Normal file
140
vendor/k8s.io/kubernetes/test/e2e/apimachinery/etcd_failure.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("etcd-failure")
|
||||
|
||||
BeforeEach(func() {
|
||||
// This test requires:
|
||||
// - SSH
|
||||
// - master access
|
||||
// ... so the provider check should be identical to the intersection of
|
||||
// providers that provide those capabilities.
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
Expect(framework.RunRC(testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
Name: "baz",
|
||||
Namespace: f.Namespace.Name,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Replicas: 1,
|
||||
})).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should recover from network partition with master", func() {
|
||||
etcdFailTest(
|
||||
f,
|
||||
"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
|
||||
"sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
|
||||
)
|
||||
})
|
||||
|
||||
It("should recover from SIGKILL", func() {
|
||||
etcdFailTest(
|
||||
f,
|
||||
"pgrep etcd | xargs -I {} sudo kill -9 {}",
|
||||
"echo 'do nothing. monit should restart etcd.'",
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
|
||||
doEtcdFailure(failCommand, fixCommand)
|
||||
|
||||
checkExistingRCRecovers(f)
|
||||
|
||||
apps.TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
}
|
||||
|
||||
// For this duration, etcd will be failed by executing a failCommand on the master.
|
||||
// If repeat is true, the failCommand will be called at a rate of once per second for
|
||||
// the failure duration. If repeat is false, failCommand will only be called once at the
|
||||
// beginning of the failure duration. After this duration, we execute a fixCommand on the
|
||||
// master and go on to assert that etcd and kubernetes components recover.
|
||||
const etcdFailureDuration = 20 * time.Second
|
||||
|
||||
func doEtcdFailure(failCommand, fixCommand string) {
|
||||
By("failing etcd")
|
||||
|
||||
masterExec(failCommand)
|
||||
time.Sleep(etcdFailureDuration)
|
||||
masterExec(fixCommand)
|
||||
}
|
||||
|
||||
func masterExec(cmd string) {
|
||||
result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if result.Code != 0 {
|
||||
framework.LogSSHResult(result)
|
||||
framework.Failf("master exec command returned non-zero")
|
||||
}
|
||||
}
|
||||
|
||||
func checkExistingRCRecovers(f *framework.Framework) {
|
||||
By("assert that the pre-existing replication controller recovers")
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
rcSelector := labels.Set{"name": "baz"}.AsSelector()
|
||||
|
||||
By("deleting pods from existing replication controller")
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
framework.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
By("waiting for replication controller to recover")
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}))
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e/apimachinery/framework.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e/apimachinery/framework.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-api-machinery] "+text, body)
|
||||
}
|
1001
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
Normal file
1001
vendor/k8s.io/kubernetes/test/e2e/apimachinery/garbage_collector.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
327
vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go
generated
vendored
Normal file
327
vendor/k8s.io/kubernetes/test/e2e/apimachinery/generated_clientset.go
generated
vendored
Normal file
@ -0,0 +1,327 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func stagingClientPod(name, value string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testingPod(name, value string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/index.html",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func observeCreation(w watch.Interface) {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
framework.Failf("Failed to observe the creation: %v", event)
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
framework.Failf("Timeout while waiting for observing the creation")
|
||||
}
|
||||
}
|
||||
|
||||
func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
|
||||
// output to give us a duration to failure. Maybe we aren't getting the
|
||||
// full timeout for some reason. My guess would be watch failure
|
||||
framework.Logf("Starting to observe pod deletion")
|
||||
deleted := false
|
||||
timeout := false
|
||||
timer := time.After(framework.DefaultPodDeletionTimeout)
|
||||
for !deleted && !timeout {
|
||||
select {
|
||||
case event, normal := <-w.ResultChan():
|
||||
if !normal {
|
||||
framework.Failf("The channel was closed unexpectedly")
|
||||
return
|
||||
}
|
||||
if event.Type == watch.Deleted {
|
||||
obj = event.Object
|
||||
deleted = true
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
}
|
||||
}
|
||||
if !deleted {
|
||||
framework.Failf("Failed to observe pod deletion")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool) {
|
||||
timer := time.After(30 * time.Second)
|
||||
updated := false
|
||||
timeout := false
|
||||
for !updated && !timeout {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type == watch.Modified {
|
||||
if expectedUpdate(event.Object) {
|
||||
updated = true
|
||||
}
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
}
|
||||
}
|
||||
if !updated {
|
||||
framework.Failf("Failed to observe pod update")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func() {
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
By("constructing the pod")
|
||||
name := "pod" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
podCopy := testingPod(name, value)
|
||||
pod := &podCopy
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
By("creating the pod")
|
||||
pod, err = podClient.Create(pod)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: pod.ResourceVersion,
|
||||
}
|
||||
pods, err = podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for pods: %v", err)
|
||||
}
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("verifying pod creation was observed")
|
||||
observeCreation(w)
|
||||
|
||||
// We need to wait for the pod to be scheduled, otherwise the deletion
|
||||
// will be carried out immediately rather than gracefully.
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
gracePeriod := int64(31)
|
||||
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
|
||||
framework.Failf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the deletionTimestamp and deletionGracePeriodSeconds of the pod is set")
|
||||
observerUpdate(w, func(obj runtime.Object) bool {
|
||||
pod := obj.(*v1.Pod)
|
||||
return pod.ObjectMeta.DeletionTimestamp != nil && *pod.ObjectMeta.DeletionGracePeriodSeconds == gracePeriod
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
|
||||
parallelism := int32(1)
|
||||
completions := int32(1)
|
||||
return &batchv1beta1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: batchv1beta1.CronJobSpec{
|
||||
Schedule: "*/1 * * * ?",
|
||||
ConcurrencyPolicy: batchv1beta1.AllowConcurrent,
|
||||
JobTemplate: batchv1beta1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
|
||||
})
|
||||
|
||||
It("should create v1beta1 cronJobs, delete cronJobs, watch cronJobs", func() {
|
||||
cronJobClient := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name)
|
||||
By("constructing the cronJob")
|
||||
name := "cronjob" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
cronJob := newTestingCronJob(name, value)
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
|
||||
options := metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err := cronJobClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: cronJobs.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := cronJobClient.Watch(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up watch: %v", err)
|
||||
}
|
||||
|
||||
By("creating the cronJob")
|
||||
cronJob, err = cronJobClient.Create(cronJob)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create cronJob: %v", err)
|
||||
}
|
||||
|
||||
By("verifying the cronJob is in kubernetes")
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector,
|
||||
ResourceVersion: cronJob.ResourceVersion,
|
||||
}
|
||||
cronJobs, err = cronJobClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to query for cronJobs: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(1))
|
||||
|
||||
By("verifying cronJob creation was observed")
|
||||
observeCreation(w)
|
||||
|
||||
By("deleting the cronJob")
|
||||
// Use DeletePropagationBackground so the CronJob is really gone when the call returns.
|
||||
propagationPolicy := metav1.DeletePropagationBackground
|
||||
if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
|
||||
framework.Failf("Failed to delete cronJob: %v", err)
|
||||
}
|
||||
|
||||
options = metav1.ListOptions{LabelSelector: selector}
|
||||
cronJobs, err = cronJobClient.List(options)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
|
||||
}
|
||||
Expect(len(cronJobs.Items)).To(Equal(0))
|
||||
})
|
||||
})
|
417
vendor/k8s.io/kubernetes/test/e2e/apimachinery/initializers.go
generated
vendored
Normal file
417
vendor/k8s.io/kubernetes/test/e2e/apimachinery/initializers.go
generated
vendored
Normal file
@ -0,0 +1,417 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/admissionregistration/v1alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
|
||||
f := framework.NewDefaultFramework("initializers")
|
||||
|
||||
// TODO: Add failure traps once we have JustAfterEach
|
||||
// See https://github.com/onsi/ginkgo/issues/303
|
||||
|
||||
It("should be invisible to controllers by default", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "uninitialized-pod"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
// wait to ensure the scheduler does not act on an uninitialized pod
|
||||
err := wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
|
||||
p, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return len(p.Spec.NodeName) > 0, nil
|
||||
})
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout))
|
||||
|
||||
// verify that we can update an initializing pod
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod.Annotations = map[string]string{"update-1": "test"}
|
||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// verify the list call filters out uninitialized pods
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(1))
|
||||
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(0))
|
||||
|
||||
// clear initializers
|
||||
pod.Initializers = nil
|
||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// pod should now start running
|
||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// ensure create call returns
|
||||
<-ch
|
||||
|
||||
// verify that we cannot start the pod initializing again
|
||||
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod.Initializers = &metav1.Initializers{
|
||||
Pending: []metav1.Initializer{{Name: "Other"}},
|
||||
}
|
||||
_, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
if !errors.IsInvalid(err) || !strings.Contains(err.Error(), "immutable") {
|
||||
Fail(fmt.Sprintf("expected invalid error: %v", err))
|
||||
}
|
||||
})
|
||||
|
||||
It("should dynamically register and apply initializers to pods [Serial]", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "uninitialized-pod"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
// create and register an initializer
|
||||
initializerName := "pod.test.e2e.kubernetes.io"
|
||||
initializerConfigName := "e2e-test-initializer"
|
||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
||||
Initializers: []v1alpha1.Initializer{
|
||||
{
|
||||
Name: initializerName,
|
||||
Rules: []v1alpha1.Rule{
|
||||
{APIGroups: []string{""}, APIVersions: []string{"*"}, Resources: []string{"pods"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
||||
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
||||
|
||||
// poller configuration is 1 second, wait at least that long
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// run create that blocks
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer close(ch)
|
||||
_, err := c.CoreV1().Pods(ns).Create(newInitPod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
// wait until the pod shows up uninitialized
|
||||
By("Waiting until the pod is visible to a client")
|
||||
var pod *v1.Pod
|
||||
err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
|
||||
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pod.Initializers).NotTo(BeNil())
|
||||
Expect(pod.Initializers.Pending).To(HaveLen(1))
|
||||
Expect(pod.Initializers.Pending[0].Name).To(Equal(initializerName))
|
||||
|
||||
// pretend we are an initializer
|
||||
By("Completing initialization")
|
||||
pod.Initializers = nil
|
||||
pod, err = c.CoreV1().Pods(ns).Update(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// ensure create call returns
|
||||
<-ch
|
||||
|
||||
// pod should now start running
|
||||
err = framework.WaitForPodRunningInNamespace(c, pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// bypass initialization by explicitly passing an empty pending list
|
||||
By("Setting an empty initializer as an admin to bypass initialization")
|
||||
podName = "preinitialized-pod"
|
||||
pod = newUninitializedPod(podName)
|
||||
pod.Initializers.Pending = nil
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pod.Initializers).To(BeNil())
|
||||
|
||||
// bypass initialization for mirror pods
|
||||
By("Creating a mirror pod that bypasses initialization")
|
||||
podName = "mirror-pod"
|
||||
pod = newInitPod(podName)
|
||||
pod.Annotations = map[string]string{
|
||||
v1.MirrorPodAnnotationKey: "true",
|
||||
}
|
||||
pod.Spec.NodeName = "node-does-not-yet-exist"
|
||||
pod, err = c.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pod.Initializers).To(BeNil())
|
||||
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
|
||||
})
|
||||
|
||||
It("don't cause replicaset controller creating extra pods if the initializer is not handled [Serial]", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "uninitialized-pod"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
// create and register an initializer, without setting up a controller to handle it.
|
||||
initializerName := "pod.test.e2e.kubernetes.io"
|
||||
initializerConfigName := "e2e-test-initializer"
|
||||
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
|
||||
Initializers: []v1alpha1.Initializer{
|
||||
{
|
||||
Name: initializerName,
|
||||
Rules: []v1alpha1.Rule{
|
||||
{APIGroups: []string{""}, APIVersions: []string{"*"}, Resources: []string{"pods"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
|
||||
defer cleanupInitializer(c, initializerConfigName, initializerName)
|
||||
|
||||
// poller configuration is 1 second, wait at least that long
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// create a replicaset
|
||||
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newReplicaset())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// wait for replicaset controller to confirm that it has handled the creation
|
||||
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// update the replicaset spec to trigger a resync
|
||||
patch := []byte(`{"spec":{"minReadySeconds":5}}`)
|
||||
persistedRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Patch(persistedRS.Name, types.StrategicMergePatchType, patch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// wait for replicaset controller to confirm that it has handle the spec update
|
||||
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// verify that the replicaset controller doesn't create extra pod
|
||||
selector, err := metav1.LabelSelectorAsSelector(persistedRS.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
IncludeUninitialized: true,
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(ns).List(listOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(pods.Items)).Should(Equal(1))
|
||||
})
|
||||
|
||||
It("will be set to nil if a patch removes the last pending initializer", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "to-be-patch-initialized-pod"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
// TODO: lower the timeout so that the server responds faster.
|
||||
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
|
||||
if err != nil && !errors.IsTimeout(err) {
|
||||
framework.Failf("expect err to be timeout error, got %v", err)
|
||||
}
|
||||
uninitializedPod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(uninitializedPod.Initializers).NotTo(BeNil())
|
||||
Expect(len(uninitializedPod.Initializers.Pending)).Should(Equal(1))
|
||||
|
||||
patch := fmt.Sprintf(`{"metadata":{"initializers":{"pending":[{"$patch":"delete","name":"%s"}]}}}`, uninitializedPod.Initializers.Pending[0].Name)
|
||||
patchedPod, err := c.CoreV1().Pods(ns).Patch(uninitializedPod.Name, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(patchedPod.Initializers).To(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
func newUninitializedPod(podName string) *v1.Pod {
|
||||
pod := newInitPod(podName)
|
||||
pod.Initializers = &metav1.Initializers{
|
||||
Pending: []metav1.Initializer{{Name: "test.k8s.io"}},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func newReplicaset() *v1beta1.ReplicaSet {
|
||||
name := "initializer-test-replicaset"
|
||||
replicas := int32(1)
|
||||
labels := map[string]string{"initializer-test": "single-replicaset"}
|
||||
return &v1beta1.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1beta1.ReplicaSetSpec{
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name + "-container",
|
||||
Image: "gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newInitPod(podName string) *v1.Pod {
|
||||
containerName := fmt.Sprintf("%s-container", podName)
|
||||
port := 8080
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Porter),
|
||||
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// removeInitializersFromAllPods walks all pods and ensures they don't have the provided initializer,
|
||||
// to guarantee completing the test doesn't block the entire cluster.
|
||||
func removeInitializersFromAllPods(c clientset.Interface, initializerName string) {
|
||||
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{IncludeUninitialized: true})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, p := range pods.Items {
|
||||
if p.Initializers == nil {
|
||||
continue
|
||||
}
|
||||
err := clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error {
|
||||
pod, err := c.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{IncludeUninitialized: true})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if pod.Initializers == nil {
|
||||
return nil
|
||||
}
|
||||
var updated []metav1.Initializer
|
||||
for _, pending := range pod.Initializers.Pending {
|
||||
if pending.Name != initializerName {
|
||||
updated = append(updated, pending)
|
||||
}
|
||||
}
|
||||
if len(updated) == len(pod.Initializers.Pending) {
|
||||
return nil
|
||||
}
|
||||
pod.Initializers.Pending = updated
|
||||
if len(updated) == 0 {
|
||||
pod.Initializers = nil
|
||||
}
|
||||
framework.Logf("Found initializer on pod %s in ns %s", pod.Name, pod.Namespace)
|
||||
_, err = c.CoreV1().Pods(p.Namespace).Update(pod)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
framework.Logf("Unable to remove initializer from pod %s in ns %s: %v", p.Name, p.Namespace, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove the initializerConfig, and remove the initializer from all pods
|
||||
func cleanupInitializer(c clientset.Interface, initializerConfigName, initializerName string) {
|
||||
if err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Delete(initializerConfigName, nil); err != nil && !errors.IsNotFound(err) {
|
||||
framework.Logf("got error on deleting %s", initializerConfigName)
|
||||
}
|
||||
// poller configuration is 1 second, wait at least that long
|
||||
time.Sleep(3 * time.Second)
|
||||
// clear our initializer from anyone who got it
|
||||
removeInitializersFromAllPods(c, initializerName)
|
||||
}
|
||||
|
||||
// waits till the RS status.observedGeneration matches metadata.generation.
|
||||
func waitForRSObservedGeneration(c clientset.Interface, ns, name string, generation int64) error {
|
||||
return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err := c.Extensions().ReplicaSets(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if generation > rs.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
277
vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/test/e2e/apimachinery/namespace.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
|
||||
var err error
|
||||
|
||||
By("Creating testing namespaces")
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(totalNS)
|
||||
for n := 0; n < totalNS; n += 1 {
|
||||
go func(n int) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
_, err = f.CreateNamespace(fmt.Sprintf("nslifetest-%v", n), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}(n)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
//Wait 10 seconds, then SEND delete requests for all the namespaces.
|
||||
By("Waiting 10 seconds")
|
||||
time.Sleep(time.Duration(10 * time.Second))
|
||||
deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(deleted)).To(Equal(totalNS))
|
||||
|
||||
By("Waiting for namespaces to vanish")
|
||||
//Now POLL until all namespaces have been eradicated.
|
||||
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
var cnt = 0
|
||||
nsList, err := f.ClientSet.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range nsList.Items {
|
||||
if strings.Contains(item.Name, "nslifetest") {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
if cnt > maxAllowedAfterDel {
|
||||
framework.Logf("Remaining namespaces : %v", cnt)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}))
|
||||
}
|
||||
|
||||
func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
|
||||
var pod *v1.Pod
|
||||
var err error
|
||||
err = wait.PollImmediate(2*time.Second, 15*time.Second, func() (bool, error) {
|
||||
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{IncludeUninitialized: true})
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return pod
|
||||
}
|
||||
|
||||
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
By("Creating a test namespace")
|
||||
namespace, err := f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a pod in the namespace")
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for the pod to have running status")
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
|
||||
|
||||
By("Creating an uninitialized pod in the namespace")
|
||||
podB := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-pod-uninitialized",
|
||||
Initializers: &metav1.Initializers{Pending: []metav1.Initializer{{Name: "test.initializer.k8s.io"}}},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
go func() {
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(podB)
|
||||
// This error is ok, because we will delete the pod before it completes initialization
|
||||
framework.Logf("error from create uninitialized namespace: %v", err)
|
||||
}()
|
||||
podB = waitForPodInNamespace(f.ClientSet, namespace.Name, podB.Name)
|
||||
|
||||
By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}))
|
||||
|
||||
By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Verifying there are no pods in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
|
||||
var err error
|
||||
|
||||
By("Creating a test namespace")
|
||||
namespace, err := f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a service in the namespace")
|
||||
serviceName := "test-service"
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: labels,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}},
|
||||
},
|
||||
}
|
||||
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting the namespace")
|
||||
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for the namespace to be removed.")
|
||||
maxWaitSeconds := int64(60)
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}))
|
||||
|
||||
By("Recreating the namespace")
|
||||
namespace, err = f.CreateNamespace("nsdeletetest", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Verifying there is no service in the namespace")
|
||||
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
// This test must run [Serial] due to the impact of running other parallel
|
||||
// tests can have on its performance. Each test that follows the common
|
||||
// test framework follows this pattern:
|
||||
// 1. Create a Namespace
|
||||
// 2. Do work that generates content in that namespace
|
||||
// 3. Delete a Namespace
|
||||
// Creation of a Namespace is non-trivial since it requires waiting for a
|
||||
// ServiceAccount to be generated.
|
||||
// Deletion of a Namespace is non-trivial and performance intensive since
|
||||
// its an orchestrated process. The controller that handles deletion must
|
||||
// query the namespace for all existing content, and then delete each piece
|
||||
// of content in turn. As the API surface grows to add more KIND objects
|
||||
// that could exist in a Namespace, the number of calls that the namespace
|
||||
// controller must orchestrate grows since it must LIST, DELETE (1x1) each
|
||||
// KIND.
|
||||
// There is work underway to improve this, but it's
|
||||
// most likely not going to get significantly better until etcd v3.
|
||||
// Going back to this test, this test generates 100 Namespace objects, and then
|
||||
// rapidly deletes all of them. This causes the NamespaceController to observe
|
||||
// and attempt to process a large number of deletes concurrently. In effect,
|
||||
// it's like running 100 traditional e2e tests in parallel. If the namespace
|
||||
// controller orchestrating deletes is slowed down deleting another test's
|
||||
// content then this test may fail. Since the goal of this test is to soak
|
||||
// Namespace creation, and soak Namespace deletion, its not appropriate to
|
||||
// further soak the cluster with other parallel Namespace deletion activities
|
||||
// that each have a variable amount of content in the associated Namespace.
|
||||
// When run in [Serial] this test appears to delete Namespace objects at a
|
||||
// rate of approximately 1 per second.
|
||||
var _ = SIGDescribe("Namespaces [Serial]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("namespaces")
|
||||
|
||||
It("should ensure that all pods are removed when a namespace is deleted.",
|
||||
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
|
||||
|
||||
It("should ensure that all services are removed when a namespace is deleted.",
|
||||
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
|
||||
|
||||
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
|
||||
func() { extinguish(f, 100, 10, 150) })
|
||||
|
||||
// On hold until etcd3; see #7372
|
||||
It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
|
||||
func() { extinguish(f, 100, 0, 150) })
|
||||
|
||||
})
|
180
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
Normal file
180
vendor/k8s.io/kubernetes/test/e2e/apimachinery/table_conversion.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"text/tabwriter"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
f := framework.NewDefaultFramework("tables")
|
||||
|
||||
It("should return pod details", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "pod-1"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
table := &metav1alpha1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
|
||||
Expect(len(table.Rows)).To(Equal(1))
|
||||
Expect(len(table.Rows[0].Cells)).To(Equal(len(table.ColumnDefinitions)))
|
||||
Expect(table.ColumnDefinitions[0].Name).To(Equal("Name"))
|
||||
Expect(table.Rows[0].Cells[0]).To(Equal(podName))
|
||||
|
||||
out := printTable(table)
|
||||
Expect(out).To(MatchRegexp("^NAME\\s"))
|
||||
Expect(out).To(MatchRegexp("\npod-1\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
It("should return chunks of table results for list calls", func() {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
client := c.CoreV1().PodTemplates(ns)
|
||||
|
||||
By("creating a large number of resources")
|
||||
workqueue.Parallelize(5, 20, func(i int) {
|
||||
for tries := 3; tries >= 0; tries-- {
|
||||
_, err := client.Create(&v1.PodTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("template-%04d", i),
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "test", Image: "test2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
|
||||
pagedTable := &metav1alpha1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
|
||||
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
|
||||
if len(pagedTable.Rows) > 2 {
|
||||
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
|
||||
}
|
||||
Expect(len(pagedTable.Rows)).To(Equal(2))
|
||||
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
|
||||
Expect(pagedTable.SelfLink).ToNot(Equal(""))
|
||||
Expect(pagedTable.Continue).ToNot(Equal(""))
|
||||
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0000"))
|
||||
Expect(pagedTable.Rows[1].Cells[0]).To(Equal("template-0001"))
|
||||
|
||||
err = c.CoreV1().RESTClient().Get().Namespace(ns).Resource("podtemplates").
|
||||
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
|
||||
SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").
|
||||
Do().Into(pagedTable)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
|
||||
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
|
||||
})
|
||||
|
||||
It("should return generic metadata details across all namespaces for nodes", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
table := &metav1alpha1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Table: %#v", table)
|
||||
|
||||
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
|
||||
Expect(len(table.Rows)).To(BeNumerically(">=", 1))
|
||||
Expect(len(table.Rows[0].Cells)).To(Equal(len(table.ColumnDefinitions)))
|
||||
Expect(table.ColumnDefinitions[0].Name).To(Equal("Name"))
|
||||
Expect(table.ResourceVersion).ToNot(Equal(""))
|
||||
Expect(table.SelfLink).ToNot(Equal(""))
|
||||
|
||||
out := printTable(table)
|
||||
Expect(out).To(MatchRegexp("^NAME\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
It("should return a 406 for a backend which does not implement metadata", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
table := &metav1alpha1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("services").SetHeader("Accept", "application/json;as=Table;v=v1alpha1;g=meta.k8s.io").Do().Into(table)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
|
||||
})
|
||||
})
|
||||
|
||||
func printTable(table *metav1alpha1.Table) string {
|
||||
buf := &bytes.Buffer{}
|
||||
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
|
||||
err := printers.PrintTable(table, tw, printers.PrintOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func newTablePod(podName string) *v1.Pod {
|
||||
containerName := fmt.Sprintf("%s-container", podName)
|
||||
port := 8080
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Porter),
|
||||
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
882
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
Normal file
882
vendor/k8s.io/kubernetes/test/e2e/apimachinery/webhook.go
generated
vendored
Normal file
@ -0,0 +1,882 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apimachinery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/admissionregistration/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
secretName = "sample-webhook-secret"
|
||||
deploymentName = "sample-webhook-deployment"
|
||||
serviceName = "e2e-test-webhook"
|
||||
roleBindingName = "webhook-auth-reader"
|
||||
webhookConfigName = "e2e-test-webhook-config"
|
||||
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
|
||||
skipNamespaceLabelKey = "skip-webhook-admission"
|
||||
skipNamespaceLabelValue = "yes"
|
||||
skippedNamespaceName = "exempted-namesapce"
|
||||
disallowedPodName = "disallowed-pod"
|
||||
disallowedConfigMapName = "disallowed-configmap"
|
||||
allowedConfigMapName = "allowed-configmap"
|
||||
crdName = "e2e-test-webhook-crd"
|
||||
crdKind = "E2e-test-webhook-crd"
|
||||
crdWebhookConfigName = "e2e-test-webhook-config-crd"
|
||||
crdMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-crd"
|
||||
crdAPIGroup = "webhook-crd-test.k8s.io"
|
||||
crdAPIVersion = "v1"
|
||||
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
|
||||
failNamespaceLabelKey = "fail-closed-webhook"
|
||||
failNamespaceLabelValue = "yes"
|
||||
failNamespaceName = "fail-closed-namesapce"
|
||||
)
|
||||
|
||||
var serverWebhookVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
|
||||
var _ = SIGDescribe("AdmissionWebhook", func() {
|
||||
var context *certContext
|
||||
f := framework.NewDefaultFramework("webhook")
|
||||
|
||||
var client clientset.Interface
|
||||
var namespaceName string
|
||||
|
||||
BeforeEach(func() {
|
||||
client = f.ClientSet
|
||||
namespaceName = f.Namespace.Name
|
||||
|
||||
// Make sure the relevant provider supports admission webhook
|
||||
framework.SkipUnlessServerVersionGTE(serverWebhookVersion, f.ClientSet.Discovery())
|
||||
framework.SkipUnlessProviderIs("gce", "gke", "local")
|
||||
|
||||
_, err := f.ClientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(metav1.ListOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Skipf("dynamic configuration of webhooks requires the admissionregistration.k8s.io group to be enabled")
|
||||
}
|
||||
|
||||
By("Setting up server cert")
|
||||
context = setupServerCert(namespaceName, serviceName)
|
||||
createAuthReaderRoleBinding(f, namespaceName)
|
||||
|
||||
// Note that in 1.9 we will have backwards incompatible change to
|
||||
// admission webhooks, so the image will be updated to 1.9 sometime in
|
||||
// the development 1.9 cycle.
|
||||
deployWebhookAndService(f, "gcr.io/kubernetes-e2e-test-images/k8s-sample-admission-webhook-amd64:1.8v6", context)
|
||||
})
|
||||
AfterEach(func() {
|
||||
cleanWebhookTest(client, namespaceName)
|
||||
})
|
||||
|
||||
It("Should be able to deny pod and configmap creation", func() {
|
||||
registerWebhook(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(webhookConfigName, nil)
|
||||
testWebhook(f)
|
||||
})
|
||||
|
||||
It("Should be able to deny custom resource creation", func() {
|
||||
crdCleanup, dynamicClient := createCRD(f)
|
||||
defer crdCleanup()
|
||||
registerWebhookForCRD(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(crdWebhookConfigName, nil)
|
||||
testCRDWebhook(f, dynamicClient)
|
||||
})
|
||||
|
||||
It("Should unconditionally reject operations on fail closed webhook", func() {
|
||||
registerFailClosedWebhook(f, context)
|
||||
defer f.ClientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(webhookFailClosedConfigName, nil)
|
||||
testFailClosedWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate configmap", func() {
|
||||
registerMutatingWebhookForConfigMap(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(mutatingWebhookConfigName, nil)
|
||||
testMutatingConfigMapWebhook(f)
|
||||
})
|
||||
|
||||
It("Should mutate crd", func() {
|
||||
crdCleanup, dynamicClient := createCRD(f)
|
||||
defer crdCleanup()
|
||||
registerMutatingWebhookForCRD(f, context)
|
||||
defer client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(crdMutatingWebhookConfigName, nil)
|
||||
testMutatingCRDWebhook(f, dynamicClient)
|
||||
})
|
||||
|
||||
// TODO: add more e2e tests for mutating webhooks
|
||||
// 1. mutating webhook that mutates pod
|
||||
// 2. mutating webhook that sends empty patch
|
||||
// 2.1 and sets status.allowed=true
|
||||
// 2.2 and sets status.allowed=false
|
||||
// 3. mutating webhook that sends patch, but also sets status.allowed=false
|
||||
// 4. mtuating webhook that fail-open v.s. fail-closed
|
||||
})
|
||||
|
||||
func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
||||
By("Create role binding to let webhook read extension-apiserver-authentication")
|
||||
client := f.ClientSet
|
||||
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
|
||||
_, err := client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: roleBindingName,
|
||||
Annotations: map[string]string{
|
||||
rbacv1beta1.AutoUpdateAnnotationKey: "true",
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "",
|
||||
Kind: "Role",
|
||||
Name: "extension-apiserver-authentication-reader",
|
||||
},
|
||||
// Webhook uses the default service account.
|
||||
Subjects: []rbacv1beta1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "default",
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
framework.Logf("role binding %s already exists", roleBindingName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
}
|
||||
|
||||
func deployWebhookAndService(f *framework.Framework, image string, context *certContext) {
|
||||
By("Deploying the webhook pod")
|
||||
client := f.ClientSet
|
||||
|
||||
// Creating the secret that contains the webhook's cert.
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": context.cert,
|
||||
"tls.key": context.key,
|
||||
},
|
||||
}
|
||||
namespace := f.Namespace.Name
|
||||
_, err := client.CoreV1().Secrets(namespace).Create(secret)
|
||||
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
|
||||
|
||||
// Create the deployment of the webhook
|
||||
podLabels := map[string]string{"app": "sample-webhook", "webhook": "true"}
|
||||
replicas := int32(1)
|
||||
zero := int64(0)
|
||||
mounts := []v1.VolumeMount{
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
ReadOnly: true,
|
||||
MountPath: "/webhook.local.config/certificates",
|
||||
},
|
||||
}
|
||||
volumes := []v1.Volume{
|
||||
{
|
||||
Name: "webhook-certs",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{SecretName: secretName},
|
||||
},
|
||||
},
|
||||
}
|
||||
containers := []v1.Container{
|
||||
{
|
||||
Name: "sample-webhook",
|
||||
VolumeMounts: mounts,
|
||||
Args: []string{
|
||||
"--tls-cert-file=/webhook.local.config/certificates/tls.crt",
|
||||
"--tls-private-key-file=/webhook.local.config/certificates/tls.key",
|
||||
"--alsologtostderr",
|
||||
"-v=4",
|
||||
"2>&1",
|
||||
},
|
||||
Image: image,
|
||||
},
|
||||
}
|
||||
d := &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
deployment, err := client.ExtensionsV1beta1().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
|
||||
By("Wait for the deployment to be ready")
|
||||
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
|
||||
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
|
||||
err = framework.WaitForDeploymentComplete(client, deployment)
|
||||
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentName, namespace)
|
||||
|
||||
By("Deploying the webhook service")
|
||||
|
||||
serviceLabels := map[string]string{"webhook": "true"}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Labels: map[string]string{"test": "webhook"},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: serviceLabels,
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Protocol: "TCP",
|
||||
Port: 443,
|
||||
TargetPort: intstr.FromInt(443),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().Services(namespace).Create(service)
|
||||
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace)
|
||||
|
||||
By("Verifying the service has paired with the endpoint")
|
||||
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceName, 1, 1*time.Second, 30*time.Second)
|
||||
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceName, 1)
|
||||
}
|
||||
|
||||
func strPtr(s string) *string { return &s }
|
||||
|
||||
func registerWebhook(f *framework.Framework, context *certContext) {
|
||||
client := f.ClientSet
|
||||
By("Registering the webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
// A webhook that cannot talk to server, with fail-open policy
|
||||
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
|
||||
policyIgnore := v1beta1.Ignore
|
||||
failOpenHook.FailurePolicy = &policyIgnore
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: webhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "deny-unwanted-pod-container-name-and-label.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/pods"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "deny-unwanted-configmap-data.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create, v1beta1.Update},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
// The webhook skips the namespace that has label "skip-webhook-admission":"yes"
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: skipNamespaceLabelKey,
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{skipNamespaceLabelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/configmaps"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
// Server cannot talk to this webhook, so it always fails.
|
||||
// Because this webhook is configured fail-open, request should be admitted after the call fails.
|
||||
failOpenHook,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", webhookConfigName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: mutatingWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "adding-configmap-data-stage-1.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-configmaps"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "adding-configmap-data-stage-2.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-configmaps"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering mutating webhook config %s with namespace %s", mutatingWebhookConfigName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
func testMutatingConfigMapWebhook(f *framework.Framework) {
|
||||
By("create a configmap that should be updated by the webhook")
|
||||
client := f.ClientSet
|
||||
configMap := toBeMutatedConfigMap(f)
|
||||
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
|
||||
Expect(err).To(BeNil())
|
||||
expectedConfigMapData := map[string]string{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
"mutation-stage-2": "yes",
|
||||
}
|
||||
if !reflect.DeepEqual(expectedConfigMapData, mutatedConfigMap.Data) {
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedConfigMapData, mutatedConfigMap.Data)
|
||||
}
|
||||
}
|
||||
|
||||
func testWebhook(f *framework.Framework) {
|
||||
By("create a pod that should be denied by the webhook")
|
||||
client := f.ClientSet
|
||||
// Creating the pod, the request should be rejected
|
||||
pod := nonCompliantPod(f)
|
||||
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedErrMsg1 := "the pod contains unwanted container name"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg1) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
|
||||
}
|
||||
expectedErrMsg2 := "the pod contains unwanted label"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg2) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg2, err.Error())
|
||||
}
|
||||
|
||||
By("create a configmap that should be denied by the webhook")
|
||||
// Creating the configmap, the request should be rejected
|
||||
configmap := nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedErrMsg := "the configmap contains unwanted key and value"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
By("create a configmap that should be admitted by the webhook")
|
||||
// Creating the configmap, the request should be admitted
|
||||
configmap = &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: allowedConfigMapName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"admit": "this",
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
toNonCompliantFn := func(cm *v1.ConfigMap) {
|
||||
if cm.Data == nil {
|
||||
cm.Data = map[string]string{}
|
||||
}
|
||||
cm.Data["webhook-e2e-test"] = "webhook-disallow"
|
||||
}
|
||||
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
|
||||
Expect(err).NotTo(BeNil())
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
|
||||
patch := nonCompliantConfigMapPatch()
|
||||
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(BeNil())
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
|
||||
By("create a namespace that bypass the webhook")
|
||||
err = createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
|
||||
Name: skippedNamespaceName,
|
||||
Labels: map[string]string{
|
||||
skipNamespaceLabelKey: skipNamespaceLabelValue,
|
||||
},
|
||||
}})
|
||||
framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName)
|
||||
// clean up the namespace
|
||||
defer client.CoreV1().Namespaces().Delete(skippedNamespaceName, nil)
|
||||
|
||||
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
|
||||
configmap = nonCompliantConfigMap(f)
|
||||
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
|
||||
Expect(err).To(BeNil())
|
||||
}
|
||||
|
||||
// failingWebhook returns a webhook with rule of create configmaps,
|
||||
// but with an invalid client config so that server cannot communicate with it
|
||||
func failingWebhook(namespace, name string) v1beta1.Webhook {
|
||||
return v1beta1.Webhook{
|
||||
Name: name,
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{""},
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"configmaps"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/configmaps"),
|
||||
},
|
||||
// Without CA bundle, the call to webhook always fails
|
||||
CABundle: nil,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func registerFailClosedWebhook(f *framework.Framework, context *certContext) {
|
||||
client := f.ClientSet
|
||||
By("Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
// A webhook that cannot talk to server, with fail-closed policy
|
||||
policyFail := v1beta1.Fail
|
||||
hook := failingWebhook(namespace, "fail-closed.k8s.io")
|
||||
hook.FailurePolicy = &policyFail
|
||||
hook.NamespaceSelector = &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: failNamespaceLabelKey,
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{failNamespaceLabelValue},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: webhookFailClosedConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
// Server cannot talk to this webhook, so it always fails.
|
||||
// Because this webhook is configured fail-closed, request should be rejected after the call fails.
|
||||
hook,
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", webhookFailClosedConfigName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
func testFailClosedWebhook(f *framework.Framework) {
|
||||
client := f.ClientSet
|
||||
By("create a namespace for the webhook")
|
||||
err := createNamespace(f, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
|
||||
Name: failNamespaceName,
|
||||
Labels: map[string]string{
|
||||
failNamespaceLabelKey: failNamespaceLabelValue,
|
||||
},
|
||||
}})
|
||||
framework.ExpectNoError(err, "creating namespace %q", failNamespaceName)
|
||||
defer client.CoreV1().Namespaces().Delete(failNamespaceName, nil)
|
||||
|
||||
By("create a configmap should be unconditionally rejected by the webhook")
|
||||
configmap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
|
||||
Expect(err).To(HaveOccurred())
|
||||
if !errors.IsInternalError(err) {
|
||||
framework.Failf("expect an internal error, got %#v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(f *framework.Framework, ns *v1.Namespace) error {
|
||||
return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
_, err := f.ClientSet.CoreV1().Namespaces().Create(ns)
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "object is being deleted:") {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func nonCompliantPod(f *framework.Framework) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: disallowedPodName,
|
||||
Labels: map[string]string{
|
||||
"webhook-e2e-test": "webhook-disallow",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "webhook-disallow",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: disallowedConfigMapName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"webhook-e2e-test": "webhook-disallow",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toBeMutatedConfigMap(f *framework.Framework) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "to-be-mutated",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"mutation-start": "yes",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func nonCompliantConfigMapPatch() string {
|
||||
return fmt.Sprint(`{"data":{"webhook-e2e-test":"webhook-disallow"}}`)
|
||||
}
|
||||
|
||||
type updateConfigMapFn func(cm *v1.ConfigMap)
|
||||
|
||||
func updateConfigMap(c clientset.Interface, ns, name string, update updateConfigMapFn) (*v1.ConfigMap, error) {
|
||||
var cm *v1.ConfigMap
|
||||
pollErr := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
var err error
|
||||
if cm, err = c.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
update(cm)
|
||||
if cm, err = c.CoreV1().ConfigMaps(ns).Update(cm); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
// Only retry update on conflict
|
||||
if !errors.IsConflict(err) {
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return cm, pollErr
|
||||
}
|
||||
|
||||
func cleanWebhookTest(client clientset.Interface, namespaceName string) {
|
||||
_ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil)
|
||||
_ = client.ExtensionsV1beta1().Deployments(namespaceName).Delete(deploymentName, nil)
|
||||
_ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil)
|
||||
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete(roleBindingName, nil)
|
||||
}
|
||||
|
||||
// newCRDForAdmissionWebhookTest generates a CRD
|
||||
func newCRDForAdmissionWebhookTest() *apiextensionsv1beta1.CustomResourceDefinition {
|
||||
return &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: crdName + "s." + crdAPIGroup},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: crdAPIGroup,
|
||||
Version: crdAPIVersion,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: crdName + "s",
|
||||
Singular: crdName,
|
||||
Kind: crdKind,
|
||||
ListKind: crdName + "List",
|
||||
},
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createCRD(f *framework.Framework) (func(), dynamic.ResourceInterface) {
|
||||
config, err := framework.LoadConfig()
|
||||
if err != nil {
|
||||
framework.Failf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
apiExtensionClient, err := crdclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
framework.Failf("failed to initialize apiExtensionClient: %v", err)
|
||||
}
|
||||
|
||||
crd := newCRDForAdmissionWebhookTest()
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
dynamicClient, err := testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient, f.ClientPool)
|
||||
if err != nil {
|
||||
framework.Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
}
|
||||
|
||||
resourceClient := dynamicClient.Resource(&metav1.APIResource{
|
||||
Name: crd.Spec.Names.Plural,
|
||||
Namespaced: true,
|
||||
}, f.Namespace.Name)
|
||||
|
||||
return func() {
|
||||
err = testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
|
||||
}
|
||||
}, resourceClient
|
||||
}
|
||||
|
||||
func registerWebhookForCRD(f *framework.Framework, context *certContext) {
|
||||
client := f.ClientSet
|
||||
By("Registering the crd webhook via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "deny-unwanted-crd-data.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{crdAPIGroup},
|
||||
APIVersions: []string{crdAPIVersion},
|
||||
Resources: []string{crdName + "s"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/crd"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", webhookConfigName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
func registerMutatingWebhookForCRD(f *framework.Framework, context *certContext) {
|
||||
client := f.ClientSet
|
||||
By("Registering the mutating webhook for crd via the AdmissionRegistration API")
|
||||
|
||||
namespace := f.Namespace.Name
|
||||
_, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdMutatingWebhookConfigName,
|
||||
},
|
||||
Webhooks: []v1beta1.Webhook{
|
||||
{
|
||||
Name: "mutate-crd-data-stage-1.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{crdAPIGroup},
|
||||
APIVersions: []string{crdAPIVersion},
|
||||
Resources: []string{crdName + "s"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-crd"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "mutate-crd-data-stage-2.k8s.io",
|
||||
Rules: []v1beta1.RuleWithOperations{{
|
||||
Operations: []v1beta1.OperationType{v1beta1.Create},
|
||||
Rule: v1beta1.Rule{
|
||||
APIGroups: []string{crdAPIGroup},
|
||||
APIVersions: []string{crdAPIVersion},
|
||||
Resources: []string{crdName + "s"},
|
||||
},
|
||||
}},
|
||||
ClientConfig: v1beta1.WebhookClientConfig{
|
||||
Service: &v1beta1.ServiceReference{
|
||||
Namespace: namespace,
|
||||
Name: serviceName,
|
||||
Path: strPtr("/mutating-crd"),
|
||||
},
|
||||
CABundle: context.signingCert,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err, "registering crd webhook config %s with namespace %s", crdMutatingWebhookConfigName, namespace)
|
||||
|
||||
// The webhook configuration is honored in 1s.
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
func testCRDWebhook(f *framework.Framework, crdClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be denied by the webhook")
|
||||
crd := newCRDForAdmissionWebhookTest()
|
||||
crInstance := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"webhook-e2e-test": "webhook-disallow",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := crdClient.Create(crInstance)
|
||||
Expect(err).NotTo(BeNil())
|
||||
expectedErrMsg := "the custom resource contains unwanted data"
|
||||
if !strings.Contains(err.Error(), expectedErrMsg) {
|
||||
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func testMutatingCRDWebhook(f *framework.Framework, crdClient dynamic.ResourceInterface) {
|
||||
By("Creating a custom resource that should be mutated by the webhook")
|
||||
crd := newCRDForAdmissionWebhookTest()
|
||||
cr := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": crd.Spec.Names.Kind,
|
||||
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": "cr-instance-1",
|
||||
"namespace": f.Namespace.Name,
|
||||
},
|
||||
"data": map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
},
|
||||
},
|
||||
}
|
||||
mutatedCR, err := crdClient.Create(cr)
|
||||
Expect(err).To(BeNil())
|
||||
expectedCRData := map[string]interface{}{
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
"mutation-stage-2": "yes",
|
||||
}
|
||||
if !reflect.DeepEqual(expectedCRData, mutatedCR.Object["data"]) {
|
||||
framework.Failf("\nexpected %#v\n, got %#v\n", expectedCRData, mutatedCR.Object["data"])
|
||||
}
|
||||
}
|
87
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
Normal file
87
vendor/k8s.io/kubernetes/test/e2e/apps/BUILD
generated
vendored
Normal file
@ -0,0 +1,87 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cronjob.go",
|
||||
"daemon_restart.go",
|
||||
"daemon_set.go",
|
||||
"deployment.go",
|
||||
"disruption.go",
|
||||
"framework.go",
|
||||
"job.go",
|
||||
"network_partition.go",
|
||||
"rc.go",
|
||||
"replica_set.go",
|
||||
"statefulset.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/apps",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/controller/replicaset:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/pointer:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
7
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
Executable file
7
vendor/k8s.io/kubernetes/test/e2e/apps/OWNERS
generated
vendored
Executable file
@ -0,0 +1,7 @@
|
||||
approvers:
|
||||
- janetkuo
|
||||
- nikhiljindal
|
||||
- kargakis
|
||||
- mfojtik
|
||||
reviewers:
|
||||
- sig-apps-reviewers
|
476
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
Normal file
476
vendor/k8s.io/kubernetes/test/e2e/apps/cronjob.go
generated
vendored
Normal file
@ -0,0 +1,476 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long to wait for a cronjob
|
||||
cronJobTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("CronJob", func() {
|
||||
f := framework.NewDefaultFramework("cronjob")
|
||||
|
||||
sleepCommand := []string{"sleep", "300"}
|
||||
|
||||
// Pod will complete instantly
|
||||
successCommand := []string{"/bin/true"}
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResourceBeta, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// multiple jobs running at once
|
||||
It("should schedule multiple jobs concurrently", func() {
|
||||
By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring more than one job is running at a time")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(len(activeJobs) >= 2).To(BeTrue())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// suspended should not schedule jobs
|
||||
It("should not schedule jobs when suspended [Slow]", func() {
|
||||
By("Creating a suspended cronjob")
|
||||
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
sleepCommand, nil)
|
||||
t := true
|
||||
cronJob.Spec.Suspend = &t
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring no jobs are scheduled")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Ensuring no job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(jobs.Items).To(HaveLen(0))
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// only single active job is allowed for ForbidConcurrent
|
||||
It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
|
||||
By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
||||
By("Ensuring no more jobs are scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// only single active job is allowed for ReplaceConcurrent
|
||||
It("should replace jobs when ReplaceConcurrent", func() {
|
||||
By("Creating a ReplaceConcurrent cronjob")
|
||||
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
activeJobs, _ := filterActiveJobs(jobs)
|
||||
Expect(activeJobs).To(HaveLen(1))
|
||||
|
||||
By("Ensuring the job is replaced with a new one")
|
||||
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// shouldn't give us unexpected warnings
|
||||
It("should not emit unexpected warnings", func() {
|
||||
By("Creating a cronjob")
|
||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
nil, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
||||
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring no unexpected event has happened")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// deleted jobs should be removed from the active list
|
||||
It("should remove from active list jobs that have been deleted", func() {
|
||||
By("Creating a ForbidConcurrent cronjob")
|
||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||
sleepCommand, nil)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a job is scheduled")
|
||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring exactly one is scheduled")
|
||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cronJob.Status.Active).Should(HaveLen(1))
|
||||
|
||||
By("Deleting the job")
|
||||
job := cronJob.Status.Active[0]
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
|
||||
By("Ensuring there are no active jobs in the cronjob")
|
||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring MissingJob event has occured")
|
||||
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// cleanup of successful finished jobs, with limit of one successful job
|
||||
It("should delete successful finished jobs with limit of one successful job", func() {
|
||||
By("Creating a AllowConcurrent cronjob with custom history limits")
|
||||
successLimit := int32(1)
|
||||
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||
successCommand, &successLimit)
|
||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Job is going to complete instantly: do not check for an active job
|
||||
// as we are most likely to miss it
|
||||
|
||||
By("Ensuring a finished job exists")
|
||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring a finished job exists by listing jobs explicitly")
|
||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, finishedJobs := filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
||||
// Job should get deleted when the next job finishes the next minute
|
||||
By("Ensuring this job does not exist anymore")
|
||||
err = waitForJobNotExist(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, finishedJobs = filterActiveJobs(jobs)
|
||||
Expect(len(finishedJobs) == 1).To(BeTrue())
|
||||
|
||||
By("Removing cronjob")
|
||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
// newTestCronJob returns a cronjob which does one of several testing behaviors.
|
||||
func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.ConcurrencyPolicy,
|
||||
command []string, successfulJobsHistoryLimit *int32) *batchv1beta1.CronJob {
|
||||
parallelism := int32(1)
|
||||
completions := int32(1)
|
||||
sj := &batchv1beta1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "CronJob",
|
||||
},
|
||||
Spec: batchv1beta1.CronJobSpec{
|
||||
Schedule: schedule,
|
||||
ConcurrencyPolicy: concurrencyPolicy,
|
||||
JobTemplate: batchv1beta1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: "busybox",
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
sj.Spec.SuccessfulJobsHistoryLimit = successfulJobsHistoryLimit
|
||||
if command != nil {
|
||||
sj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = command
|
||||
}
|
||||
return sj
|
||||
}
|
||||
|
||||
func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
|
||||
return c.BatchV1beta1().CronJobs(ns).Create(cronJob)
|
||||
}
|
||||
|
||||
func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) {
|
||||
return c.BatchV1beta1().CronJobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func deleteCronJob(c clientset.Interface, ns, name string) error {
|
||||
return c.BatchV1beta1().CronJobs(ns).Delete(name, nil)
|
||||
}
|
||||
|
||||
// Wait for at least given amount of active jobs.
|
||||
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(curr.Status.Active) >= active, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for jobs to appear in the active list of a cronjob or not.
|
||||
// When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after
|
||||
// the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still
|
||||
// empty after the timeout.
|
||||
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1beta1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if failIfNonEmpty {
|
||||
return len(curr.Status.Active) == 0, nil
|
||||
} else {
|
||||
return len(curr.Status.Active) != 0, nil
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to not exist by listing jobs explicitly.
|
||||
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, finishedJobs := filterActiveJobs(jobs)
|
||||
for _, job := range finishedJobs {
|
||||
if targetJob.Namespace == job.Namespace && targetJob.Name == job.Name {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for a job to be replaced with a new one.
|
||||
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Ignore Jobs pending deletion, since deletion of Jobs is now asynchronous.
|
||||
aliveJobs := filterNotDeletedJobs(jobs)
|
||||
if len(aliveJobs) > 1 {
|
||||
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
|
||||
} else if len(aliveJobs) == 0 {
|
||||
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
return false, nil
|
||||
}
|
||||
return aliveJobs[0].Name != previousJobName, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForJobsAtLeast waits for at least a number of jobs to appear.
|
||||
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(jobs.Items) >= atLeast, nil
|
||||
})
|
||||
}
|
||||
|
||||
// waitForAnyFinishedJob waits for any completed job to appear.
|
||||
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
|
||||
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
|
||||
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for i := range jobs.Items {
|
||||
if job.IsJobFinished(&jobs.Items[i]) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// checkNoEventWithReason checks no events with a reason within a list has occured
|
||||
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
|
||||
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
|
||||
}
|
||||
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in listing events: %s", err)
|
||||
}
|
||||
for _, e := range events.Items {
|
||||
for _, reason := range reasons {
|
||||
if e.Reason == reason {
|
||||
return fmt.Errorf("Found event with reason %s: %#v", reason, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// filterNotDeletedJobs returns the job list without any jobs that are pending
|
||||
// deletion.
|
||||
func filterNotDeletedJobs(jobs *batchv1.JobList) []*batchv1.Job {
|
||||
var alive []*batchv1.Job
|
||||
for i := range jobs.Items {
|
||||
job := &jobs.Items[i]
|
||||
if job.DeletionTimestamp == nil {
|
||||
alive = append(alive, job)
|
||||
}
|
||||
}
|
||||
return alive
|
||||
}
|
||||
|
||||
func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job, finished []*batchv1.Job) {
|
||||
for i := range jobs.Items {
|
||||
j := jobs.Items[i]
|
||||
if !job.IsJobFinished(&j) {
|
||||
active = append(active, &j)
|
||||
} else {
|
||||
finished = append(finished, &j)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
315
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
Normal file
315
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_restart.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// This test primarily checks 2 things:
|
||||
// 1. Daemons restart automatically within some sane time (10m).
|
||||
// 2. They don't take abnormal actions when restarted in the steady state.
|
||||
// - Controller manager shouldn't overshoot replicas
|
||||
// - Kubelet shouldn't restart containers
|
||||
// - Scheduler should continue assigning hosts to new pods
|
||||
|
||||
const (
|
||||
restartPollInterval = 5 * time.Second
|
||||
restartTimeout = 10 * time.Minute
|
||||
numPods = 10
|
||||
sshPort = 22
|
||||
ADD = "ADD"
|
||||
DEL = "DEL"
|
||||
UPDATE = "UPDATE"
|
||||
)
|
||||
|
||||
// restartDaemonConfig is a config to restart a running daemon on a node, and wait till
|
||||
// it comes back up. It uses ssh to send a SIGTERM to the daemon.
|
||||
type restartDaemonConfig struct {
|
||||
nodeName string
|
||||
daemonName string
|
||||
healthzPort int
|
||||
pollInterval time.Duration
|
||||
pollTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewRestartConfig creates a restartDaemonConfig for the given node and daemon.
|
||||
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig {
|
||||
if !framework.ProviderIs("gce") {
|
||||
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
}
|
||||
return &restartDaemonConfig{
|
||||
nodeName: nodeName,
|
||||
daemonName: daemonName,
|
||||
healthzPort: healthzPort,
|
||||
pollInterval: pollInterval,
|
||||
pollTimeout: pollTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *restartDaemonConfig) String() string {
|
||||
return fmt.Sprintf("Daemon %v on node %v", r.daemonName, r.nodeName)
|
||||
}
|
||||
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *restartDaemonConfig) waitUp() {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
healthzCheck := fmt.Sprintf(
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
|
||||
result, err := framework.NodeExec(r.nodeName, healthzCheck)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code == 0 {
|
||||
httpCode, err := strconv.Atoi(result.Stdout)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to parse healthz http return code: %v", err)
|
||||
} else if httpCode == 200 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
|
||||
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout)
|
||||
}
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *restartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
||||
func (r *restartDaemonConfig) restart() {
|
||||
r.waitUp()
|
||||
r.kill()
|
||||
r.waitUp()
|
||||
}
|
||||
|
||||
// podTracker records a serial history of events that might've affects pods.
|
||||
type podTracker struct {
|
||||
cache.ThreadSafeStore
|
||||
}
|
||||
|
||||
func (p *podTracker) remember(pod *v1.Pod, eventType string) {
|
||||
if eventType == UPDATE && pod.Status.Phase == v1.PodRunning {
|
||||
return
|
||||
}
|
||||
p.Add(fmt.Sprintf("[%v] %v: %v", time.Now(), eventType, pod.Name), pod)
|
||||
}
|
||||
|
||||
func (p *podTracker) String() (msg string) {
|
||||
for _, k := range p.ListKeys() {
|
||||
obj, exists := p.Get(k)
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
pod := obj.(*v1.Pod)
|
||||
msg += fmt.Sprintf("%v Phase %v Host %v\n", k, pod.Status.Phase, pod.Spec.NodeName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newPodTracker() *podTracker {
|
||||
return &podTracker{cache.NewThreadSafeStore(
|
||||
cache.Indexers{}, cache.Indices{})}
|
||||
}
|
||||
|
||||
// replacePods replaces content of the store with the given pods.
|
||||
func replacePods(pods []*v1.Pod, store cache.Store) {
|
||||
found := make([]interface{}, 0, len(pods))
|
||||
for i := range pods {
|
||||
found = append(found, pods[i])
|
||||
}
|
||||
framework.ExpectNoError(store.Replace(found, "0"))
|
||||
}
|
||||
|
||||
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
|
||||
// and a list of nodenames across which these containers restarted.
|
||||
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
|
||||
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
framework.ExpectNoError(err)
|
||||
failedContainers := 0
|
||||
containerRestartNodes := sets.NewString()
|
||||
for _, p := range pods.Items {
|
||||
for _, v := range testutils.FailedContainers(&p) {
|
||||
failedContainers = failedContainers + v.Restarts
|
||||
containerRestartNodes.Insert(p.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
return failedContainers, containerRestartNodes.List()
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("daemonrestart")
|
||||
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(uuid.NewUUID())
|
||||
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
|
||||
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
var ns string
|
||||
var config testutils.RCConfig
|
||||
var controller cache.Controller
|
||||
var newPods cache.Store
|
||||
var stopCh chan struct{}
|
||||
var tracker *podTracker
|
||||
|
||||
BeforeEach(func() {
|
||||
// These tests require SSH
|
||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// All the restart tests need an rc and a watch on pods of the rc.
|
||||
// Additionally some of them might scale the rc during the test.
|
||||
config = testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: rcName,
|
||||
Namespace: ns,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Replicas: numPods,
|
||||
CreatedPods: &[]*v1.Pod{},
|
||||
}
|
||||
Expect(framework.RunRC(config)).NotTo(HaveOccurred())
|
||||
replacePods(*config.CreatedPods, existingPods)
|
||||
|
||||
stopCh = make(chan struct{})
|
||||
tracker = newPodTracker()
|
||||
newPods, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector.String()
|
||||
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Pod{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
tracker.remember(obj.(*v1.Pod), ADD)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
tracker.remember(newObj.(*v1.Pod), UPDATE)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
tracker.remember(obj.(*v1.Pod), DEL)
|
||||
},
|
||||
},
|
||||
)
|
||||
go controller.Run(stopCh)
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
})
|
||||
|
||||
It("Controller Manager should not create/delete replicas across restart", func() {
|
||||
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
|
||||
restarter.restart()
|
||||
|
||||
// The intent is to ensure the replication controller manager has observed and reported status of
|
||||
// the replication controller at least once since the manager restarted, so that we can determine
|
||||
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
|
||||
// to the same size achieves this, because the scale operation advances the RC's sequence number
|
||||
// and awaits it to be observed and reported back in the RC's status.
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
|
||||
|
||||
// Only check the keys, the pods can be different if the kubelet updated it.
|
||||
// TODO: Can it really?
|
||||
existingKeys := sets.NewString()
|
||||
newKeys := sets.NewString()
|
||||
for _, k := range existingPods.ListKeys() {
|
||||
existingKeys.Insert(k)
|
||||
}
|
||||
for _, k := range newPods.ListKeys() {
|
||||
newKeys.Insert(k)
|
||||
}
|
||||
if len(newKeys.List()) != len(existingKeys.List()) ||
|
||||
!newKeys.IsSuperset(existingKeys) {
|
||||
framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
|
||||
}
|
||||
})
|
||||
|
||||
It("Scheduler should continue assigning pods to nodes across restart", func() {
|
||||
|
||||
// Requires master ssh access.
|
||||
framework.SkipUnlessProviderIs("gce", "aws")
|
||||
restarter := NewRestartConfig(
|
||||
framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
|
||||
|
||||
// Create pods while the scheduler is down and make sure the scheduler picks them up by
|
||||
// scaling the rc to the same size.
|
||||
restarter.waitUp()
|
||||
restarter.kill()
|
||||
// This is best effort to try and create pods while the scheduler is down,
|
||||
// since we don't know exactly when it is restarted after the kill signal.
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
|
||||
restarter.waitUp()
|
||||
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
|
||||
nodeIPs, err := framework.GetNodePublicIps(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
}
|
||||
for _, ip := range nodeIPs {
|
||||
restarter := NewRestartConfig(
|
||||
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout)
|
||||
restarter.restart()
|
||||
}
|
||||
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if postRestarts != preRestarts {
|
||||
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
|
||||
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
|
||||
}
|
||||
})
|
||||
})
|
928
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
Normal file
928
vendor/k8s.io/kubernetes/test/e2e/apps/daemon_set.go
generated
vendored
Normal file
@ -0,0 +1,928 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// this should not be a multiple of 5, because node status updates
|
||||
// every 5 seconds. See https://github.com/kubernetes/kubernetes/pull/14915.
|
||||
dsRetryPeriod = 1 * time.Second
|
||||
dsRetryTimeout = 5 * time.Minute
|
||||
|
||||
daemonsetLabelPrefix = "daemonset-"
|
||||
daemonsetNameLabel = daemonsetLabelPrefix + "name"
|
||||
daemonsetColorLabel = daemonsetLabelPrefix + "color"
|
||||
)
|
||||
|
||||
// This test must be run in serial because it assumes the Daemon Set pods will
|
||||
// always get scheduled. If we run other tests in parallel, this may not
|
||||
// happen. In the future, running in parallel may work if we have an eviction
|
||||
// model which lets the DS controller kick out other pods to make room.
|
||||
// See http://issues.k8s.io/21767 for more details
|
||||
var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
var f *framework.Framework
|
||||
|
||||
AfterEach(func() {
|
||||
// Clean up
|
||||
daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to dump DaemonSets")
|
||||
if daemonsets != nil && len(daemonsets.Items) > 0 {
|
||||
for _, ds := range daemonsets.Items {
|
||||
By(fmt.Sprintf("Deleting DaemonSet %q with reaper", ds.Name))
|
||||
dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = dsReaper.Stop(f.Namespace.Name, ds.Name, 0, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, &ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
f = framework.NewDefaultFramework("daemonsets")
|
||||
|
||||
image := framework.ServeHostnameImage
|
||||
dsName := "daemon-set"
|
||||
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
c = f.ClientSet
|
||||
err := clearDaemonSetNodeLabels(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should run and stop simple daemon", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Stop a daemon pod, check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
It("should run and stop complex daemon", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Update the node label to green, and wait for daemons to be unscheduled")
|
||||
nodeSelector[daemonsetColorLabel] = "green"
|
||||
greenNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
||||
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
|
||||
By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
|
||||
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
|
||||
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred(), "error patching daemon set")
|
||||
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{greenNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should run and stop complex daemon with node affinity", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: daemonsetColorLabel,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodeSelector[daemonsetColorLabel]},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Initially, daemon pods should not be running on any nodes.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
|
||||
|
||||
By("Change node label to blue, check that daemon pod is launched.")
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
|
||||
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
|
||||
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
|
||||
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
|
||||
Expect(len(daemonSetLabels)).To(Equal(1))
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, ds, []string{newNode.Name}))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Remove the node label and wait for daemons to be unscheduled")
|
||||
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
|
||||
Expect(wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, ds))).
|
||||
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
|
||||
})
|
||||
|
||||
It("should retry creating failed daemon pods", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
err = checkDaemonStatus(f, dsName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.")
|
||||
podList := listDaemonPods(c, ns, label)
|
||||
pod := podList.Items[0]
|
||||
pod.ResourceVersion = ""
|
||||
pod.Status.Phase = v1.PodFailed
|
||||
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
|
||||
Expect(err).NotTo(HaveOccurred(), "error failing a daemon pod")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
|
||||
})
|
||||
|
||||
It("Should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
templateGeneration := "1"
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, "1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
first := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
firstHash := first.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(first.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(int64(2)))
|
||||
|
||||
By("Check that daemon pods images aren't updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, image, 0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Make sure all daemon pods have correct template generation 1")
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, templateGeneration)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
Expect(cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]).NotTo(Equal(firstHash))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), firstHash, templateGeneration)
|
||||
})
|
||||
|
||||
It("Should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple daemon set %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 1)
|
||||
cur := curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(1)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
|
||||
By("Update daemon pods image.")
|
||||
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, RedisImage)
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
templateGeneration++
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
By("Check that daemon pods images are updated.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, ds, RedisImage, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Make sure all daemon pods have correct template generation %d", templateGeneration))
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Check that daemon pods are still running on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
// Check history and labels
|
||||
ds, err = c.ExtensionsV1beta1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
waitForHistoryCreated(c, ns, label, 2)
|
||||
cur = curHistory(listDaemonHistories(c, ns, label), ds)
|
||||
hash = cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
Expect(cur.Revision).To(Equal(int64(2)))
|
||||
checkDaemonSetPodsLabels(listDaemonPods(c, ns, label), hash, fmt.Sprint(templateGeneration))
|
||||
})
|
||||
|
||||
It("Should adopt existing pods when creating a RollingUpdate DaemonSet regardless of templateGeneration", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
// 1. Create a RollingUpdate DaemonSet
|
||||
templateGeneration := int64(999)
|
||||
framework.Logf("Creating simple RollingUpdate DaemonSet %s with templateGeneration %d", dsName, templateGeneration)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.TemplateGeneration = templateGeneration
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(ds.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster.")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Make sure all daemon pods have correct template generation %d", templateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(templateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Orphan DaemonSet pods
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", dsName)
|
||||
deleteDaemonSetAndOrphan(c, ds)
|
||||
|
||||
// 3. Adopt DaemonSet pods (no restart)
|
||||
newDSName := "adopt"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newDSName)
|
||||
newDS := newDaemonSet(newDSName, image, label)
|
||||
newDS.Spec.TemplateGeneration = templateGeneration
|
||||
newDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newDS.Spec.Template, ds.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newDS.Name)
|
||||
waitDaemonSetAdoption(c, newDS, ds.Name, templateGeneration)
|
||||
|
||||
// 4. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newDSName)
|
||||
deleteDaemonSetAndOrphan(c, newDS)
|
||||
|
||||
// 5. Adopt DaemonSet pods (no restart) as long as template matches, even when templateGeneration doesn't match
|
||||
newAdoptDSName := "adopt-template-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS := newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(int64(1)))
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).NotTo(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).To(BeTrue(), "DaemonSet template should match to adopt pods")
|
||||
|
||||
framework.Logf(fmt.Sprintf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name))
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
|
||||
// 6. Orphan DaemonSet pods again
|
||||
framework.Logf("Deleting DaemonSet %s and orphaning its pods and history", newAdoptDSName)
|
||||
deleteDaemonSetAndOrphan(c, newAdoptDS)
|
||||
|
||||
// 7. Adopt DaemonSet pods (no restart) as long as templateGeneration matches, even when template doesn't match
|
||||
newAdoptDSName = "adopt-template-generation-matches"
|
||||
framework.Logf("Creating a new RollingUpdate DaemonSet %s to adopt pods", newAdoptDSName)
|
||||
newAdoptDS = newDaemonSet(newAdoptDSName, image, label)
|
||||
newAdoptDS.Spec.Template.Spec.Containers[0].Name = "not-match"
|
||||
newAdoptDS.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
newAdoptDS.Spec.TemplateGeneration = templateGeneration
|
||||
newAdoptDS, err = c.ExtensionsV1beta1().DaemonSets(ns).Create(newAdoptDS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newAdoptDS.Spec.TemplateGeneration).To(Equal(templateGeneration))
|
||||
Expect(apiequality.Semantic.DeepEqual(newAdoptDS.Spec.Template, newDS.Spec.Template)).NotTo(BeTrue(), "DaemonSet template should not match")
|
||||
|
||||
framework.Logf("Wait for pods and history to be adopted by DaemonSet %s", newAdoptDS.Name)
|
||||
waitDaemonSetAdoption(c, newAdoptDS, ds.Name, templateGeneration)
|
||||
})
|
||||
|
||||
It("Should rollback without unnecessary restarts", func() {
|
||||
// Skip clusters with only one node, where we cannot have half-done DaemonSet rollout for this test
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = extensions.DaemonSetUpdateStrategy{Type: extensions.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.ExtensionsV1beta1().DaemonSets(ns).Create(ds)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Make sure we're in the middle of a rollout
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkAtLeastOneNewPod(c, ns, label, newImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
var existingPods, newPods []*v1.Pod
|
||||
for i := range pods.Items {
|
||||
pod := pods.Items[i]
|
||||
image := pod.Spec.Containers[0].Image
|
||||
switch image {
|
||||
case ds.Spec.Template.Spec.Containers[0].Image:
|
||||
existingPods = append(existingPods, &pod)
|
||||
case newDS.Spec.Template.Spec.Containers[0].Image:
|
||||
newPods = append(newPods, &pod)
|
||||
default:
|
||||
framework.Failf("unexpected pod found, image = %s", image)
|
||||
}
|
||||
}
|
||||
Expect(len(existingPods)).NotTo(Equal(0))
|
||||
Expect(len(newPods)).NotTo(Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *extensions.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// After rollback is done, compare current pods with previous old pods during rollout, to make sure they're not restarted
|
||||
pods = listDaemonPods(c, ns, label)
|
||||
rollbackPods := map[string]bool{}
|
||||
for _, pod := range pods.Items {
|
||||
rollbackPods[pod.Name] = true
|
||||
}
|
||||
for _, pod := range existingPods {
|
||||
Expect(rollbackPods[pod.Name]).To(BeTrue(), fmt.Sprintf("unexpected pod %s be restarted", pod.Name))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// getDaemonSetImagePatch generates a patch for updating a DaemonSet's container image
|
||||
func getDaemonSetImagePatch(containerName, containerImage string) string {
|
||||
return fmt.Sprintf(`{"spec":{"template":{"spec":{"containers":[{"name":"%s","image":"%s"}]}}}}`, containerName, containerImage)
|
||||
}
|
||||
|
||||
// deleteDaemonSetAndOrphan deletes the given DaemonSet and orphans all its dependents.
|
||||
// It also checks that all dependents are orphaned, and the DaemonSet is deleted.
|
||||
func deleteDaemonSetAndOrphan(c clientset.Interface, ds *extensions.DaemonSet) {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(ds.UID))
|
||||
err := c.ExtensionsV1beta1().DaemonSets(ds.Namespace).Delete(ds.Name, deleteOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryOrphaned(c, ds.Namespace, ds.Spec.Template.Labels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be orphaned")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetDeleted(c, ds.Namespace, ds.Name))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet to be deleted")
|
||||
}
|
||||
|
||||
func newDaemonSet(dsName, image string, label map[string]string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: dsName,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: label,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "app",
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(podList.Items)).To(BeNumerically(">", 0))
|
||||
return podList
|
||||
}
|
||||
|
||||
func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, map[string]string) {
|
||||
daemonSetLabels := map[string]string{}
|
||||
otherLabels := map[string]string{}
|
||||
for k, v := range labels {
|
||||
if strings.HasPrefix(k, daemonsetLabelPrefix) {
|
||||
daemonSetLabels[k] = v
|
||||
} else {
|
||||
otherLabels[k] = v
|
||||
}
|
||||
}
|
||||
return daemonSetLabels, otherLabels
|
||||
}
|
||||
|
||||
func clearDaemonSetNodeLabels(c clientset.Interface) error {
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(c)
|
||||
for _, node := range nodeList.Items {
|
||||
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
|
||||
nodeClient := c.CoreV1().Nodes()
|
||||
var newNode *v1.Node
|
||||
var newLabels map[string]string
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
|
||||
node, err := nodeClient.Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// remove all labels this test is creating
|
||||
daemonSetLabels, otherLabels := separateDaemonSetNodeLabels(node.Labels)
|
||||
if reflect.DeepEqual(daemonSetLabels, labels) {
|
||||
newNode = node
|
||||
return true, nil
|
||||
}
|
||||
node.Labels = otherLabels
|
||||
for k, v := range labels {
|
||||
node.Labels[k] = v
|
||||
}
|
||||
newNode, err = nodeClient.Update(node)
|
||||
if err == nil {
|
||||
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
|
||||
return true, err
|
||||
}
|
||||
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
framework.Logf("failed to update node due to resource version conflict")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(newLabels) != len(labels) {
|
||||
return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
|
||||
}
|
||||
|
||||
return newNode, nil
|
||||
}
|
||||
|
||||
func checkDaemonPodOnNodes(f *framework.Framework, ds *extensions.DaemonSet, nodeNames []string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("could not get the pod list: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
nodesToPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
if podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
nodesToPodCount[pod.Spec.NodeName] += 1
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
framework.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
return len(nodesToPodCount) == len(nodeNames), nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkRunningOnAllNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
nodeNames := schedulableNodes(f.ClientSet, ds)
|
||||
return checkDaemonPodOnNodes(f, ds, nodeNames)()
|
||||
}
|
||||
}
|
||||
|
||||
func schedulableNodes(c clientset.Interface, ds *extensions.DaemonSet) []string {
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]string, newImage string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.Spec.Containers[0].Image == newImage {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
|
||||
func canScheduleOnNode(node v1.Node, ds *extensions.DaemonSet) bool {
|
||||
newPod := daemon.NewPod(ds, node.Name)
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
fit, _, err := daemon.Predicates(newPod, nodeInfo)
|
||||
if err != nil {
|
||||
framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
|
||||
return false
|
||||
}
|
||||
return fit
|
||||
}
|
||||
|
||||
func checkRunningOnNoNodes(f *framework.Framework, ds *extensions.DaemonSet) func() (bool, error) {
|
||||
return checkDaemonPodOnNodes(f, ds, make([]string, 0))
|
||||
}
|
||||
|
||||
func checkDaemonStatus(f *framework.Framework, dsName string) error {
|
||||
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not get daemon set from v1.")
|
||||
}
|
||||
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
|
||||
if desired != scheduled && desired != ready {
|
||||
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *extensions.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pods := podList.Items
|
||||
|
||||
unavailablePods := 0
|
||||
nodesToUpdatedPodCount := make(map[string]int)
|
||||
for _, pod := range pods {
|
||||
if !metav1.IsControlledBy(&pod, ds) {
|
||||
continue
|
||||
}
|
||||
podImage := pod.Spec.Containers[0].Image
|
||||
if podImage != image {
|
||||
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
} else {
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName] += 1
|
||||
}
|
||||
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
framework.Logf("Pod %s is not available", pod.Name)
|
||||
unavailablePods++
|
||||
}
|
||||
}
|
||||
if unavailablePods > maxUnavailable {
|
||||
return false, fmt.Errorf("number of unavailable pods: %d is greater than maxUnavailable: %d", unavailablePods, maxUnavailable)
|
||||
}
|
||||
// Make sure every daemon pod on the node has been updated
|
||||
nodeNames := schedulableNodes(c, ds)
|
||||
for _, node := range nodeNames {
|
||||
if nodesToUpdatedPodCount[node] == 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonPodsTemplateGeneration(c clientset.Interface, ns string, label map[string]string, templateGeneration string) error {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// We don't care about inactive pods
|
||||
if !controller.IsPodActive(&pod) {
|
||||
continue
|
||||
}
|
||||
podTemplateGeneration := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if podTemplateGeneration != templateGeneration {
|
||||
return fmt.Errorf("expected pod %s/%s template generation %s, but got %s", pod.Namespace, pod.Name, templateGeneration, podTemplateGeneration)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetDeleted(c clientset.Interface, ns, name string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
_, err := c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// This pod is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetHistoryOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
histories := listDaemonHistories(c, ns, label)
|
||||
for _, history := range histories.Items {
|
||||
// This history is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
// This pod is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&pod); controllerRef == nil || controllerRef.UID != dsUID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func checkDaemonSetHistoryAdopted(c clientset.Interface, ns string, dsUID types.UID, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
histories := listDaemonHistories(c, ns, label)
|
||||
for _, history := range histories.Items {
|
||||
// This history is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&history); controllerRef == nil || controllerRef.UID != dsUID {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func waitDaemonSetAdoption(c clientset.Interface, ds *extensions.DaemonSet, podPrefix string, podTemplateGeneration int64) {
|
||||
ns := ds.Namespace
|
||||
label := ds.Spec.Template.Labels
|
||||
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetPodsAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet pods to be adopted")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonSetHistoryAdopted(c, ns, ds.UID, label))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for DaemonSet history to be adopted")
|
||||
|
||||
framework.Logf("Make sure no daemon pod updated its template generation %d", podTemplateGeneration)
|
||||
err = checkDaemonPodsTemplateGeneration(c, ns, label, fmt.Sprint(podTemplateGeneration))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure no pods are recreated by looking at their names")
|
||||
err = checkDaemonSetPodsName(c, ns, podPrefix, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsName(c clientset.Interface, ns, prefix string, label map[string]string) error {
|
||||
pods := listDaemonPods(c, ns, label)
|
||||
for _, pod := range pods.Items {
|
||||
if !strings.HasPrefix(pod.Name, prefix) {
|
||||
return fmt.Errorf("expected pod %s name to be prefixed %q", pod.Name, prefix)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDaemonSetPodsLabels(podList *v1.PodList, hash, templateGeneration string) {
|
||||
for _, pod := range podList.Items {
|
||||
podHash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
podTemplate := pod.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
Expect(len(podHash)).To(BeNumerically(">", 0))
|
||||
if len(hash) > 0 {
|
||||
Expect(podHash).To(Equal(hash))
|
||||
}
|
||||
Expect(len(podTemplate)).To(BeNumerically(">", 0))
|
||||
Expect(podTemplate).To(Equal(templateGeneration))
|
||||
}
|
||||
}
|
||||
|
||||
func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]string, numHistory int) {
|
||||
listHistoryFn := func() (bool, error) {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(historyList.Items) == numHistory {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for controllerrevisions to be created")
|
||||
}
|
||||
|
||||
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *apps.ControllerRevisionList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
historyList, err := c.AppsV1beta1().ControllerRevisions(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(historyList.Items)).To(BeNumerically(">", 0))
|
||||
return historyList
|
||||
}
|
||||
|
||||
func curHistory(historyList *apps.ControllerRevisionList, ds *extensions.DaemonSet) *apps.ControllerRevision {
|
||||
var curHistory *apps.ControllerRevision
|
||||
foundCurHistories := 0
|
||||
for i := range historyList.Items {
|
||||
history := &historyList.Items[i]
|
||||
// Every history should have the hash label
|
||||
Expect(len(history.Labels[extensions.DefaultDaemonSetUniqueLabelKey])).To(BeNumerically(">", 0))
|
||||
match, err := daemon.Match(ds, history)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if match {
|
||||
curHistory = history
|
||||
foundCurHistories++
|
||||
}
|
||||
}
|
||||
Expect(foundCurHistories).To(Equal(1))
|
||||
Expect(curHistory).NotTo(BeNil())
|
||||
return curHistory
|
||||
}
|
826
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
Normal file
826
vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go
generated
vendored
Normal file
@ -0,0 +1,826 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
dRetryPeriod = 2 * time.Second
|
||||
dRetryTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
nilRs *extensions.ReplicaSet
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Deployment", func() {
|
||||
var ns string
|
||||
var c clientset.Interface
|
||||
|
||||
AfterEach(func() {
|
||||
failureTrap(c, ns)
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("deployment")
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("deployment reaping should cascade to its replica sets and pods", func() {
|
||||
testDeleteDeployment(f)
|
||||
})
|
||||
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
|
||||
testRollingUpdateDeployment(f)
|
||||
})
|
||||
It("RecreateDeployment should delete old pods and create new ones", func() {
|
||||
testRecreateDeployment(f)
|
||||
})
|
||||
It("deployment should delete old replica sets", func() {
|
||||
testDeploymentCleanUpPolicy(f)
|
||||
})
|
||||
It("deployment should support rollover", func() {
|
||||
testRolloverDeployment(f)
|
||||
})
|
||||
It("deployment should support rollback", func() {
|
||||
testRollbackDeployment(f)
|
||||
})
|
||||
It("iterative rollouts should eventually progress", func() {
|
||||
testIterativeDeployments(f)
|
||||
})
|
||||
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||
testDeploymentsControllerRef(f)
|
||||
})
|
||||
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues
|
||||
// See https://github.com/kubernetes/kubernetes/issues/29229
|
||||
})
|
||||
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.ExtensionsV1beta1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for i := range deployments.Items {
|
||||
d := deployments.Items[i]
|
||||
|
||||
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
return
|
||||
}
|
||||
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
|
||||
rsList := allOldRSs
|
||||
if newRS != nil {
|
||||
rsList = append(rsList, newRS)
|
||||
}
|
||||
testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
|
||||
}
|
||||
// We need print all the ReplicaSets if there are no Deployment object created
|
||||
if len(deployments.Items) != 0 {
|
||||
return
|
||||
}
|
||||
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for _, rs := range rss.Items {
|
||||
framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
|
||||
for _, pod := range podList.Items {
|
||||
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func intOrStrP(num int) *intstr.IntOrString {
|
||||
intstr := intstr.FromInt(num)
|
||||
return &intstr
|
||||
}
|
||||
|
||||
func newDeploymentRollback(name string, annotations map[string]string, revision int64) *extensions.DeploymentRollback {
|
||||
return &extensions.DeploymentRollback{
|
||||
Name: name,
|
||||
UpdatedAnnotations: annotations,
|
||||
RollbackTo: extensions.RollbackConfig{Revision: revision},
|
||||
}
|
||||
}
|
||||
|
||||
func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
|
||||
err = reaper.Stop(ns, deployment.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rss.Items).Should(HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Pods may be created by overlapping deployments right after this deployment is deleted, ignore them
|
||||
if len(pods.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
|
||||
}
|
||||
}
|
||||
|
||||
func testDeleteDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
internalClient := f.InternalClientset
|
||||
|
||||
deploymentName := "test-new-deployment"
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", NginxImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(newRS).NotTo(Equal(nilRs))
|
||||
stopDeployment(c, internalClient, ns, deploymentName)
|
||||
}
|
||||
|
||||
func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "sample-pod"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod",
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-rolling-update-controller"
|
||||
replicas := int32(1)
|
||||
rsRevision := "3546343826724305832"
|
||||
annotations := make(map[string]string)
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %s", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(allOldRSs)).Should(Equal(1))
|
||||
// The old RS should contain pod-template-hash in its selector, label, and template label
|
||||
Expect(len(allOldRSs[0].Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
Expect(len(allOldRSs[0].Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey])).Should(BeNumerically(">", 0))
|
||||
}
|
||||
|
||||
func testRecreateDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, extensions.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
Expect(framework.WatchRecreateDeployment(c, deployment)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// testDeploymentCleanUpPolicy tests that deployment supports cleanup policy
|
||||
func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
// Create nginx pods.
|
||||
deploymentPodLabels := map[string]string{"name": "cleanup-pod"}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "cleanup-pod",
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
rsName := "test-cleanup-controller"
|
||||
replicas := int32(1)
|
||||
revisionHistoryLimit := utilpointer.Int32Ptr(0)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, "cleanup-pod", false, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
|
||||
options := metav1.ListOptions{
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
w, err := c.CoreV1().Pods(ns).Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
go func() {
|
||||
// There should be only one pod being created, which is the pod with the redis image.
|
||||
// The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector.
|
||||
numPodCreation := 1
|
||||
for {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
continue
|
||||
}
|
||||
numPodCreation--
|
||||
if numPodCreation < 0 {
|
||||
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
|
||||
}
|
||||
pod, ok := event.Object.(*v1.Pod)
|
||||
if !ok {
|
||||
framework.Failf("Expect event Object to be a pod")
|
||||
}
|
||||
if pod.Spec.Containers[0].Name != RedisImageName {
|
||||
framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
|
||||
}
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
d := framework.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
|
||||
err = framework.WaitForDeploymentOldRSsNum(c, ns, deploymentName, int(*revisionHistoryLimit))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// testRolloverDeployment tests that deployment supports rollover.
|
||||
// i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes.
|
||||
func testRolloverDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
podName := "rollover-pod"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
rsPodLabels := map[string]string{
|
||||
"name": podName,
|
||||
"pod": NginxImageName,
|
||||
}
|
||||
|
||||
rsName := "test-rollover-controller"
|
||||
rsReplicas := int32(1)
|
||||
_, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, NginxImageName, NginxImage))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
err = framework.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
Expect(framework.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(HaveOccurred())
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave"
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
newDeployment := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
MaxSurge: intOrStrP(1),
|
||||
}
|
||||
newDeployment.Spec.MinReadySeconds = int32(10)
|
||||
_, err = c.ExtensionsV1beta1().Deployments(ns).Create(newDeployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = framework.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.ExtensionsV1beta1())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
// Not really the new replica set anymore but we GET by name so that's fine.
|
||||
newRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ensureReplicas(newRS, int32(0))
|
||||
}
|
||||
|
||||
func ensureReplicas(rs *extensions.ReplicaSet, replicas int32) {
|
||||
Expect(*rs.Spec.Replicas).Should(Equal(replicas))
|
||||
Expect(rs.Status.Replicas).Should(Equal(replicas))
|
||||
}
|
||||
|
||||
// testRollbackDeployment tests that a deployment is created (revision 1) and updated (revision 2), and
|
||||
// then rollback to revision 1 (should update template to revision 1, and then update revision 1 to 3),
|
||||
// and then rollback to last revision (which is revision 4 that comes from revision 2).
|
||||
// Then rollback the deployment to revision 10 (doesn't exist in history) should fail.
|
||||
// Finally, rollback current deployment (revision 4) to revision 4 should be no-op.
|
||||
func testRollbackDeployment(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
podName := "nginx"
|
||||
deploymentPodLabels := map[string]string{"name": podName}
|
||||
|
||||
// 1. Create a deployment to create nginx pods.
|
||||
deploymentName, deploymentImageName := "test-rollback-deployment", NginxImageName
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := extensions.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
d := framework.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create"
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 2. Update the deployment to create redis pods.
|
||||
updatedDeploymentImage := RedisImage
|
||||
updatedDeploymentImageName := RedisImageName
|
||||
updateAnnotation := map[string]string{"action": "update", "log": "I need to update it"}
|
||||
deployment, err := framework.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage
|
||||
update.Annotations = updateAnnotation
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
err = framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update"
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision := int64(1)
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackDone in deployment status and check it here
|
||||
|
||||
// Wait for it to be updated to revision 3
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3", deploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "create", after the rollback
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, createAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
revision = 0
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 4
|
||||
err = framework.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Current newRS annotation should be "update", after the rollback
|
||||
err = framework.CheckNewRSAnnotations(c, ns, deploymentName, updateAnnotation)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
// Since there's no revision 10 in history, it should stay as revision 4
|
||||
revision = 10
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackRevisionNotFound in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since there's no revision 10
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
revision = 4
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the deployment to start rolling back
|
||||
err = framework.WaitForDeploymentRollbackCleared(c, ns, deploymentName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO: report RollbackTemplateUnchanged in deployment status and check it here
|
||||
|
||||
// The pod template shouldn't change since it's already revision 4
|
||||
// Check if it's still revision 4 and still has the old pod template
|
||||
err = framework.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "4", updatedDeploymentImage)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func randomScale(d *extensions.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
framework.Logf("%02d: scaling up", i)
|
||||
*(d.Spec.Replicas)++
|
||||
case r < 0.6:
|
||||
if *(d.Spec.Replicas) > 1 {
|
||||
framework.Logf("%02d: scaling down", i)
|
||||
*(d.Spec.Replicas)--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testIterativeDeployments(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(6)
|
||||
zero := int64(0)
|
||||
two := int32(2)
|
||||
|
||||
// Create a nginx deployment.
|
||||
deploymentName := "nginx"
|
||||
thirty := int32(30)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
iterations := 20
|
||||
for i := 0; i < iterations; i++ {
|
||||
if r := rand.Float32(); r < 0.6 {
|
||||
time.Sleep(time.Duration(float32(i) * r * float32(time.Second)))
|
||||
}
|
||||
|
||||
switch n := rand.Float32(); {
|
||||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
rollbackTo := &extensions.RollbackConfig{Revision: 0}
|
||||
update.Spec.RollbackTo = rollbackTo
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case n < 0.8:
|
||||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
default:
|
||||
// arbitrarily delete deployment pods
|
||||
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(opts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(podList.Items) == 0 {
|
||||
framework.Logf("%02d: no deployment pods to delete", i)
|
||||
continue
|
||||
}
|
||||
for p := range podList.Items {
|
||||
if rand.Float32() < 0.5 {
|
||||
continue
|
||||
}
|
||||
name := podList.Items[p].Name
|
||||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unpause the deployment if we end up pausing it
|
||||
deployment, err = c.ExtensionsV1beta1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if deployment.Spec.Paused {
|
||||
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
})
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
Expect(framework.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
Expect(framework.WaitForDeploymentComplete(c, deployment)).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
|
||||
deploymentName := "test-orphan-deployment"
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Checking its ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSets", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for the ReplicaSet to be orphaned")
|
||||
err = wait.Poll(dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels))
|
||||
Expect(err).NotTo(HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = framework.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, extensions.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.ExtensionsV1beta1().Deployments(ns).Create(d)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForDeploymentComplete(c, deploy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func checkDeploymentReplicaSetsControllerRef(c clientset.Interface, ns string, uid types.UID, label map[string]string) error {
|
||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||
for _, rs := range rsList.Items {
|
||||
// This rs is adopted only when its controller ref is update
|
||||
if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid {
|
||||
return fmt.Errorf("ReplicaSet %s has unexpected controllerRef %v", rs.Name, controllerRef)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
rsList := listDeploymentReplicaSets(c, ns, label)
|
||||
for _, rs := range rsList.Items {
|
||||
// This rs is orphaned only when controller ref is cleared
|
||||
if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *extensions.ReplicaSetList {
|
||||
selector := labels.Set(label).AsSelector()
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(options)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(rsList.Items)).To(BeNumerically(">", 0))
|
||||
return rsList
|
||||
}
|
||||
|
||||
func orphanDeploymentReplicaSets(c clientset.Interface, d *extensions.Deployment) error {
|
||||
trueVar := true
|
||||
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
|
||||
return c.ExtensionsV1beta1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
|
||||
}
|
335
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
Normal file
335
vendor/k8s.io/kubernetes/test/e2e/apps/disruption.go
generated
vendored
Normal file
@ -0,0 +1,335 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// schedulingTimeout is longer specifically because sometimes we need to wait
|
||||
// awhile to guarantee that we've been patient waiting for something ordinary
|
||||
// to happen: a pod to get scheduled and move into Ready
|
||||
const (
|
||||
bigClusterSize = 7
|
||||
schedulingTimeout = 10 * time.Minute
|
||||
timeout = 60 * time.Second
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("DisruptionController", func() {
|
||||
f := framework.NewDefaultFramework("disruption")
|
||||
var ns string
|
||||
var cs kubernetes.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
cs = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
It("should create a PodDisruptionBudget", func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromString("1%"))
|
||||
})
|
||||
|
||||
It("should update PodDisruptionBudget status", func() {
|
||||
createPDBMinAvailableOrDie(cs, ns, intstr.FromInt(2))
|
||||
|
||||
createPodsOrDie(cs, ns, 3)
|
||||
waitForPodsOrDie(cs, ns, 3)
|
||||
|
||||
// Since disruptionAllowed starts out 0, if we see it ever become positive,
|
||||
// that means the controller is working.
|
||||
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return pdb.Status.PodDisruptionsAllowed > 0, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
evictionCases := []struct {
|
||||
description string
|
||||
minAvailable intstr.IntOrString
|
||||
maxUnavailable intstr.IntOrString
|
||||
podCount int
|
||||
replicaSetSize int32
|
||||
shouldDeny bool
|
||||
exclusive bool
|
||||
skipForBigClusters bool
|
||||
}{
|
||||
{
|
||||
description: "no PDB",
|
||||
minAvailable: intstr.FromString(""),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
podCount: 1,
|
||||
shouldDeny: false,
|
||||
}, {
|
||||
description: "too few pods, absolute",
|
||||
minAvailable: intstr.FromInt(2),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
podCount: 2,
|
||||
shouldDeny: true,
|
||||
}, {
|
||||
description: "enough pods, absolute",
|
||||
minAvailable: intstr.FromInt(2),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
podCount: 3,
|
||||
shouldDeny: false,
|
||||
}, {
|
||||
description: "enough pods, replicaSet, percentage",
|
||||
minAvailable: intstr.FromString("90%"),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
replicaSetSize: 10,
|
||||
exclusive: false,
|
||||
shouldDeny: false,
|
||||
}, {
|
||||
description: "too few pods, replicaSet, percentage",
|
||||
minAvailable: intstr.FromString("90%"),
|
||||
maxUnavailable: intstr.FromString(""),
|
||||
replicaSetSize: 10,
|
||||
exclusive: true,
|
||||
shouldDeny: true,
|
||||
// This tests assumes that there is less than replicaSetSize nodes in the cluster.
|
||||
skipForBigClusters: true,
|
||||
},
|
||||
{
|
||||
description: "maxUnavailable allow single eviction, percentage",
|
||||
minAvailable: intstr.FromString(""),
|
||||
maxUnavailable: intstr.FromString("10%"),
|
||||
replicaSetSize: 10,
|
||||
exclusive: false,
|
||||
shouldDeny: false,
|
||||
},
|
||||
{
|
||||
description: "maxUnavailable deny evictions, integer",
|
||||
minAvailable: intstr.FromString(""),
|
||||
maxUnavailable: intstr.FromInt(1),
|
||||
replicaSetSize: 10,
|
||||
exclusive: true,
|
||||
shouldDeny: true,
|
||||
// This tests assumes that there is less than replicaSetSize nodes in the cluster.
|
||||
skipForBigClusters: true,
|
||||
},
|
||||
}
|
||||
for i := range evictionCases {
|
||||
c := evictionCases[i]
|
||||
expectation := "should allow an eviction"
|
||||
if c.shouldDeny {
|
||||
expectation = "should not allow an eviction"
|
||||
}
|
||||
It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
|
||||
if c.skipForBigClusters {
|
||||
framework.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
|
||||
}
|
||||
createPodsOrDie(cs, ns, c.podCount)
|
||||
if c.replicaSetSize > 0 {
|
||||
createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive)
|
||||
}
|
||||
|
||||
if c.minAvailable.String() != "" {
|
||||
createPDBMinAvailableOrDie(cs, ns, c.minAvailable)
|
||||
}
|
||||
|
||||
if c.maxUnavailable.String() != "" {
|
||||
createPDBMaxUnavailableOrDie(cs, ns, c.maxUnavailable)
|
||||
}
|
||||
|
||||
// Locate a running pod.
|
||||
var pod v1.Pod
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for i := range podList.Items {
|
||||
if podList.Items[i].Status.Phase == v1.PodRunning {
|
||||
pod = podList.Items[i]
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
e := &policy.Eviction{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
|
||||
if c.shouldDeny {
|
||||
// Since disruptionAllowed starts out false, wait at least 60s hoping that
|
||||
// this gives the controller enough time to have truly set the status.
|
||||
time.Sleep(timeout)
|
||||
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
|
||||
} else {
|
||||
// Only wait for running pods in the "allow" case
|
||||
// because one of shouldDeny cases relies on the
|
||||
// replicaSet not fitting on the cluster.
|
||||
waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize))
|
||||
|
||||
// Since disruptionAllowed starts out false, if an eviction is ever allowed,
|
||||
// that means the controller is working.
|
||||
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
|
||||
err = cs.CoreV1().Pods(ns).Evict(e)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MinAvailable: &minAvailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
||||
pdb := policy.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
MaxUnavailable: &maxUnavailable,
|
||||
},
|
||||
}
|
||||
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%d", i),
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.CoreV1().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
By("Waiting for all pods to be running")
|
||||
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
|
||||
pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if pods == nil {
|
||||
return false, fmt.Errorf("pods is nil")
|
||||
}
|
||||
if len(pods.Items) < n {
|
||||
framework.Logf("pods: %v < %v", len(pods.Items), n)
|
||||
return false, nil
|
||||
}
|
||||
ready := 0
|
||||
for i := 0; i < n; i++ {
|
||||
if pods.Items[i].Status.Phase == v1.PodRunning {
|
||||
ready++
|
||||
}
|
||||
}
|
||||
if ready < n {
|
||||
framework.Logf("running pods: %v < %v", ready, n)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
|
||||
}
|
||||
|
||||
func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclusive bool) {
|
||||
container := v1.Container{
|
||||
Name: "busybox",
|
||||
Image: "gcr.io/google_containers/echoserver:1.6",
|
||||
}
|
||||
if exclusive {
|
||||
container.Ports = []v1.ContainerPort{
|
||||
{HostPort: 5555, ContainerPort: 5555},
|
||||
}
|
||||
}
|
||||
|
||||
rs := &extensions.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "rs",
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: &size,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{container},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err := cs.Extensions().ReplicaSets(ns).Create(rs)
|
||||
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e/apps/framework.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e/apps/framework.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-apps] "+text, body)
|
||||
}
|
194
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
Normal file
194
vendor/k8s.io/kubernetes/test/e2e/apps/job.go
generated
vendored
Normal file
@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Job", func() {
|
||||
f := framework.NewDefaultFramework("job")
|
||||
parallelism := int32(2)
|
||||
completions := int32(4)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
||||
// Simplest case: all pods succeed promptly
|
||||
It("should run a job to completion when tasks succeed", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed.
|
||||
It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
|
||||
By("Creating a job")
|
||||
// One failure, then a success, local restarts.
|
||||
// We can't use the random failure approach used by the
|
||||
// non-local test below, because kubelet will throttle
|
||||
// frequently failing containers in a given pod, ramping
|
||||
// up to 5 minutes between restarts, making test timeouts
|
||||
// due to successive failures too likely with a reasonable
|
||||
// test timeout.
|
||||
job := framework.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Pods sometimes fail, but eventually succeed, after pod restarts
|
||||
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
|
||||
By("Creating a job")
|
||||
// 50% chance of container success, local restarts.
|
||||
// Can't use the failOnce approach because that relies
|
||||
// on an emptyDir, which is not preserved across new pods.
|
||||
// Worst case analysis: 15 failures, each taking 1 minute to
|
||||
// run due to some slowness, 1 in 2^15 chance of happening,
|
||||
// causing test flake. Should be very rare.
|
||||
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, 999)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should exceed active deadline", func() {
|
||||
By("Creating a job")
|
||||
var activeDeadlineSeconds int64 = 1
|
||||
job := framework.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Ensuring job past active deadline")
|
||||
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should delete a job", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("delete a job")
|
||||
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
timeout := 1 * time.Minute
|
||||
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, metav1.NewDeleteOptions(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job was deleted")
|
||||
_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(errors.IsNotFound(err)).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should adopt matching orphans and release non-matching pods", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
// Replace job with the one returned from Create() so it has the UID.
|
||||
// Save Kind since it won't be populated in the returned job.
|
||||
kind := job.Kind
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
job.Kind = kind
|
||||
|
||||
By("Ensuring active pods == parallelism")
|
||||
err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Orphaning one of the Job's Pods")
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(int(parallelism)))
|
||||
pod := pods.Items[0]
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.OwnerReferences = nil
|
||||
})
|
||||
|
||||
By("Checking that the Job readopts the Pod")
|
||||
Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", framework.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef == nil {
|
||||
return false, nil
|
||||
}
|
||||
if controllerRef.Kind != job.Kind || controllerRef.Name != job.Name || controllerRef.UID != job.UID {
|
||||
return false, fmt.Errorf("pod has wrong controllerRef: got %v, want %v", controllerRef, job)
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be readopted", pod.Name)
|
||||
|
||||
By("Removing the labels from the Job's Pod")
|
||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||
pod.Labels = nil
|
||||
})
|
||||
|
||||
By("Checking that the Job releases the Pod")
|
||||
Expect(framework.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", framework.JobTimeout,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(pod)
|
||||
if controllerRef != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
)).To(Succeed(), "wait for pod %q to be released", pod.Name)
|
||||
})
|
||||
|
||||
It("should exceed backoffLimit", func() {
|
||||
By("Creating a job")
|
||||
job := framework.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, 0)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Ensuring job exceed backofflimit")
|
||||
|
||||
err = framework.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, framework.JobTimeout, "BackoffLimitExceeded")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that only one pod created and status is failed")
|
||||
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(pods.Items).To(HaveLen(1))
|
||||
pod := pods.Items[0]
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
})
|
||||
})
|
639
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
Normal file
639
vendor/k8s.io/kubernetes/test/e2e/apps/network_partition.go
generated
vendored
Normal file
@ -0,0 +1,639 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
nodepkg "k8s.io/kubernetes/pkg/controller/node"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
podReadyTimeout = 2 * time.Minute
|
||||
podNotReadyTimeout = 1 * time.Minute
|
||||
nodeReadinessTimeout = 3 * time.Minute
|
||||
resizeNodeReadyTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
|
||||
timeout := false
|
||||
expected := false
|
||||
timer := time.After(nodeReadinessTimeout)
|
||||
for !expected && !timeout {
|
||||
select {
|
||||
case n := <-newNode:
|
||||
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
|
||||
expected = true
|
||||
} else {
|
||||
framework.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
}
|
||||
}
|
||||
if !expected {
|
||||
framework.Failf("Failed to observe node ready status change to %v", isReady)
|
||||
}
|
||||
}
|
||||
|
||||
func podOnNode(podName, nodeName string, image string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"name": podName,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: image,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
|
||||
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
|
||||
if err == nil {
|
||||
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
} else {
|
||||
framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("network-partition")
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke", "aws")
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
} else {
|
||||
group = framework.TestContext.CloudConfig.NodeInstanceGroup
|
||||
}
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Pods", func() {
|
||||
Context("should return to running and ready state after network partition is healed", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
})
|
||||
|
||||
// What happens in this test:
|
||||
// Network traffic from a node to master is cut off to simulate network partition
|
||||
// Expect to observe:
|
||||
// 1. Node is marked NotReady after timeout by nodecontroller (40seconds)
|
||||
// 2. All pods on node are marked NotReady shortly after #1
|
||||
// 3. Node and pods return to Ready after connectivivty recovers
|
||||
It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts metav1.ListOptions
|
||||
nodeOpts := metav1.ListOptions{}
|
||||
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||
return false
|
||||
}
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
if err != nil || len(pods.Items) <= 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if len(nodes.Items) <= 0 {
|
||||
framework.Failf("No eligible node were found: %d", len(nodes.Items))
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
|
||||
By("Set up watch on node status")
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
|
||||
stopCh := make(chan struct{})
|
||||
newNode := make(chan *v1.Node)
|
||||
var controller cache.Controller
|
||||
_, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
return f.ClientSet.CoreV1().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Node{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
newNode <- n
|
||||
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// Will not explicitly close newNode channel here due to
|
||||
// race condition where stopCh and newNode are closed but informer onUpdate still executes.
|
||||
close(stopCh)
|
||||
}()
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[ReplicationController]", func() {
|
||||
It("should recreate pods scheduled on the unreachable node "+
|
||||
"AND allow scheduling of pods on a node after it rejoins the cluster", func() {
|
||||
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||
// that belongs to replication controller 'rcName', really disappeared (because its
|
||||
// grace period is set to 0).
|
||||
// Finally, it checks that the replication controller recreates the
|
||||
// pods on another node and that now the number of replicas is equal 'replicas'.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("verifying whether the pod from the unreachable node is recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
|
||||
// sleep a bit, to allow Watch in NodeController to catch up.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
By("verify whether new pods can be created on the re-attached node")
|
||||
// increasing the RC size is not a valid way to test this
|
||||
// since we have no guarantees the pod will be scheduled on our node.
|
||||
additionalPod := "additionalpod"
|
||||
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// verify that it is really on the requested node
|
||||
{
|
||||
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() {
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-net"
|
||||
gracePeriod := int64(30)
|
||||
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||
// that belongs to replication controller 'rcName', did not disappear (because its
|
||||
// grace period is set to 30).
|
||||
// Finally, it checks that the replication controller recreates the
|
||||
// pods on another node and that now the number of replicas is equal 'replicas + 1'.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
||||
_, err = framework.PodsCreated(c, ns, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[StatefulSet]", func() {
|
||||
psName := "ss"
|
||||
labels := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
headlessSvcName := "test"
|
||||
|
||||
BeforeEach(func() {
|
||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||
framework.SkipUnlessProviderIs("gke")
|
||||
By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
|
||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
|
||||
framework.ExpectNoError(err)
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
It("should come back up if node goes down [Slow] [Disruptive]", func() {
|
||||
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
|
||||
nn := framework.TestContext.CloudConfig.NumNodes
|
||||
nodeNames, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn)
|
||||
framework.ExpectNoError(err)
|
||||
common.RestartNodes(f.ClientSet, nodeNames)
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
})
|
||||
|
||||
It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||
_, err := c.AppsV1beta1().StatefulSets(ns).Create(ps)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pst := framework.NewStatefulSetTester(c)
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
|
||||
pod := pst.GetPodList(ps).Items[0]
|
||||
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
|
||||
// that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver.
|
||||
// The grace period on the stateful pods is set to a value > 0.
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
|
||||
By("waiting for pods to be running again")
|
||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("[Job]", func() {
|
||||
It("should create new pods when node is partitioned", func() {
|
||||
parallelism := int32(2)
|
||||
completions := int32(4)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
||||
job := framework.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
||||
parallelism, completions, nil, backoffLimit)
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{framework.JobSelectorKey: job.Name}))
|
||||
|
||||
By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
nodeName := pods.Items[0].Spec.NodeName
|
||||
|
||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
|
||||
// running pods after the node-controller detects node unreachable.
|
||||
By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
Expect(err).To(Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Pods", func() {
|
||||
Context("should be evicted from unready Node", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
})
|
||||
|
||||
// What happens in this test:
|
||||
// Network traffic from a node to master is cut off to simulate network partition
|
||||
// Expect to observe:
|
||||
// 1. Node is marked NotReady after timeout by nodecontroller (40seconds)
|
||||
// 2. All pods on node are marked NotReady shortly after #1
|
||||
// 3. After enough time passess all Pods are evicted from the given Node
|
||||
It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
|
||||
"AND all pods should be evicted after eviction timeout passes", func() {
|
||||
By("choose a node - we will block all network traffic on this node")
|
||||
var podOpts metav1.ListOptions
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||
return false
|
||||
}
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
if err != nil || len(pods.Items) <= 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if len(nodes.Items) <= 0 {
|
||||
framework.Failf("No eligible node were found: %d", len(nodes.Items))
|
||||
}
|
||||
node := nodes.Items[0]
|
||||
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
|
||||
if err := framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
|
||||
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
|
||||
}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
podTolerationTimes := map[string]time.Duration{}
|
||||
// This test doesn't add tolerations by itself, but because they may be present in the cluster
|
||||
// it needs to account for that.
|
||||
for _, pod := range pods.Items {
|
||||
namespacedName := fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
tolerations := pod.Spec.Tolerations
|
||||
framework.ExpectNoError(err)
|
||||
for _, toleration := range tolerations {
|
||||
if toleration.ToleratesTaint(nodepkg.UnreachableTaintTemplate) {
|
||||
if toleration.TolerationSeconds != nil {
|
||||
podTolerationTimes[namespacedName] = time.Duration(*toleration.TolerationSeconds) * time.Second
|
||||
break
|
||||
} else {
|
||||
podTolerationTimes[namespacedName] = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := podTolerationTimes[namespacedName]; !ok {
|
||||
podTolerationTimes[namespacedName] = 0
|
||||
}
|
||||
}
|
||||
neverEvictedPods := []string{}
|
||||
maxTolerationTime := time.Duration(0)
|
||||
for podName, tolerationTime := range podTolerationTimes {
|
||||
if tolerationTime < 0 {
|
||||
neverEvictedPods = append(neverEvictedPods, podName)
|
||||
} else {
|
||||
if tolerationTime > maxTolerationTime {
|
||||
maxTolerationTime = tolerationTime
|
||||
}
|
||||
}
|
||||
}
|
||||
framework.Logf(
|
||||
"Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v",
|
||||
neverEvictedPods,
|
||||
maxTolerationTime,
|
||||
)
|
||||
|
||||
By("Set up watch on node status")
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
|
||||
stopCh := make(chan struct{})
|
||||
newNode := make(chan *v1.Node)
|
||||
var controller cache.Controller
|
||||
_, controller = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
return f.ClientSet.CoreV1().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Node{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
newNode <- n
|
||||
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
defer func() {
|
||||
// Will not explicitly close newNode channel here due to
|
||||
// race condition where stopCh and newNode are closed but informer onUpdate still executes.
|
||||
close(stopCh)
|
||||
}()
|
||||
go controller.Run(stopCh)
|
||||
|
||||
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
||||
host := framework.GetNodeExternalIP(&node)
|
||||
master := framework.GetMasterAddress(c)
|
||||
defer func() {
|
||||
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
||||
framework.UnblockNetwork(host, master)
|
||||
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
return
|
||||
}
|
||||
|
||||
By("Expect to observe node status change from NotReady to Ready after network connectivity recovers")
|
||||
expectNodeReadiness(true, newNode)
|
||||
}()
|
||||
|
||||
framework.BlockNetwork(host, master)
|
||||
|
||||
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||
expectNodeReadiness(false, newNode)
|
||||
framework.ExpectNoError(wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
|
||||
}))
|
||||
if err = framework.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
|
||||
framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
|
||||
}
|
||||
|
||||
sleepTime := maxTolerationTime + 20*time.Second
|
||||
By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime))
|
||||
time.Sleep(sleepTime)
|
||||
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts)
|
||||
framework.ExpectNoError(err)
|
||||
seenRunning := []string{}
|
||||
for _, pod := range pods.Items {
|
||||
namespacedName := fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
shouldBeTerminating := true
|
||||
for _, neverEvictedPod := range neverEvictedPods {
|
||||
if neverEvictedPod == namespacedName {
|
||||
shouldBeTerminating = false
|
||||
}
|
||||
}
|
||||
if pod.DeletionTimestamp == nil {
|
||||
seenRunning = append(seenRunning, namespacedName)
|
||||
if shouldBeTerminating {
|
||||
framework.Failf("Pod %v should have been deleted but was seen running", namespacedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, neverEvictedPod := range neverEvictedPods {
|
||||
running := false
|
||||
for _, runningPod := range seenRunning {
|
||||
if runningPod == neverEvictedPod {
|
||||
running = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !running {
|
||||
framework.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
328
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
Normal file
328
vendor/k8s.io/kubernetes/test/e2e/apps/rc.go
generated
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("ReplicationController", func() {
|
||||
f := framework.NewDefaultFramework("replication-controller")
|
||||
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
|
||||
TestReplicationControllerServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.ServeHostname
|
||||
privateimage.SetRegistry(imageutils.PrivateRegistry)
|
||||
TestReplicationControllerServeImageOrFail(f, "private", imageutils.GetE2EImage(privateimage))
|
||||
})
|
||||
|
||||
It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
testReplicationControllerConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation", func() {
|
||||
testRCAdoptMatchingOrphans(f)
|
||||
})
|
||||
|
||||
It("should release no longer matching pods", func() {
|
||||
testRCReleaseControlledNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
||||
func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *v1.ReplicationController {
|
||||
zero := int64(0)
|
||||
return &v1.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func(i int32) *int32 { return &i }(replicas),
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rcPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// A basic test to check the deployment of an image using
|
||||
// a replication controller. The image serves its hostname
|
||||
// which is checked for each replica.
|
||||
func TestReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
// Create a replication controller for a service
|
||||
// that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that pods for the new RC were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
err = f.WaitForPodRunning(pod.Name)
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if running != replicas {
|
||||
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Create a quota restricting pods in the current namespace to 2.
|
||||
// 2. Create a replication controller that wants to run 3 pods.
|
||||
// 3. Check replication controller conditions for a ReplicaFailure condition.
|
||||
// 4. Relax quota or scale down the controller and observe the condition is gone.
|
||||
func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podQuota := quota.Status.Hard[v1.ResourcePods]
|
||||
quantity := resource.MustParse("2")
|
||||
return (&podQuota).Cmp(quantity) == 0, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("resource quota %q never synced", name)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
||||
generation := rc.Generation
|
||||
conditions := rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rc.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rc.Status.Conditions
|
||||
|
||||
cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
|
||||
return cond != nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
||||
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
|
||||
generation = rc.Generation
|
||||
conditions = rc.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rc.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rc.Status.Conditions
|
||||
|
||||
cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
|
||||
return cond == nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||
name := "pod-adoption"
|
||||
By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: NginxImageName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("When a replication controller with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the RC
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
// pod adopted
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
// pod still not adopted
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||
name := "pod-release"
|
||||
By("Given a ReplicationController is created")
|
||||
replicas := int32(1)
|
||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
p := pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||
if err != nil && errors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rc.UID {
|
||||
// pod still belonging to the replication controller
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// pod already released
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
327
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
Normal file
327
vendor/k8s.io/kubernetes/test/e2e/apps/replica_set.go
generated
vendored
Normal file
@ -0,0 +1,327 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rsName,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: rsPodLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPodQuota(name, number string) *v1.ResourceQuota {
|
||||
return &v1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse(number),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("ReplicaSet", func() {
|
||||
f := framework.NewDefaultFramework("replicaset")
|
||||
|
||||
framework.ConformanceIt("should serve a basic image on each replica with a public image ", func() {
|
||||
testReplicaSetServeImageOrFail(f, "basic", framework.ServeHostnameImage)
|
||||
})
|
||||
|
||||
It("should serve a basic image on each replica with a private image", func() {
|
||||
// requires private images
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
privateimage := imageutils.ServeHostname
|
||||
privateimage.SetRegistry(imageutils.PrivateRegistry)
|
||||
testReplicaSetServeImageOrFail(f, "private", imageutils.GetE2EImage(privateimage))
|
||||
})
|
||||
|
||||
It("should surface a failure condition on a common issue like exceeded quota", func() {
|
||||
testReplicaSetConditionCheck(f)
|
||||
})
|
||||
|
||||
It("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||
testRSAdoptMatchingAndReleaseNotMatching(f)
|
||||
})
|
||||
})
|
||||
|
||||
// A basic test to check the deployment of an image using a ReplicaSet. The
|
||||
// image serves its hostname which is checked for each replica.
|
||||
func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
|
||||
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
|
||||
replicas := int32(1)
|
||||
|
||||
// Create a ReplicaSet for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Check that pods for the new RS were created.
|
||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
err = f.WaitForPodRunning(pod.Name)
|
||||
if err != nil {
|
||||
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
|
||||
if getErr == nil {
|
||||
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
|
||||
} else {
|
||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||
}
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
// Sanity check
|
||||
if running != replicas {
|
||||
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
|
||||
if err != nil {
|
||||
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
// 1. Create a quota restricting pods in the current namespace to 2.
|
||||
// 2. Create a replica set that wants to run 3 pods.
|
||||
// 3. Check replica set conditions for a ReplicaFailure condition.
|
||||
// 4. Scale down the replica set and observe the condition is gone.
|
||||
func testReplicaSetConditionCheck(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
quantity := resource.MustParse("2")
|
||||
podQuota := quota.Status.Hard[v1.ResourcePods]
|
||||
return (&podQuota).Cmp(quantity) == 0, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("resource quota %q never synced", name)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Create(rs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||
generation := rs.Generation
|
||||
conditions := rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rs.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
return cond != nil, nil
|
||||
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) {
|
||||
x := int32(2)
|
||||
update.Spec.Replicas = &x
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
|
||||
generation = rs.Generation
|
||||
conditions = rs.Status.Conditions
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
rs, err = c.ExtensionsV1beta1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if generation > rs.Status.ObservedGeneration {
|
||||
return false, nil
|
||||
}
|
||||
conditions = rs.Status.Conditions
|
||||
|
||||
cond := replicaset.GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
return cond == nil, nil
|
||||
})
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||
name := "pod-adoption-release"
|
||||
By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
|
||||
p := f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: NginxImageName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
By("When a replicaset with a matching selector is created")
|
||||
replicas := int32(1)
|
||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImageName)
|
||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the orphan pod is adopted")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
// The Pod p should either be adopted or deleted by the ReplicaSet
|
||||
if errors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
// pod adopted
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
// pod still not adopted
|
||||
return false, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("When the matched label of one of its pods change")
|
||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
p = &pods.Items[0]
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||
if err != nil && errors.IsConflict(err) {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Then the pod is released")
|
||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, owner := range p2.OwnerReferences {
|
||||
if *owner.Controller && owner.UID == rs.UID {
|
||||
// pod still belonging to the replicaset
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// pod already released
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
1136
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
Normal file
1136
vendor/k8s.io/kubernetes/test/e2e/apps/statefulset.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
37
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/test/e2e/apps/types.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apps
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
NginxImageName = "nginx"
|
||||
RedisImageName = "redis"
|
||||
)
|
||||
|
||||
var (
|
||||
CronJobGroupVersionResourceAlpha = schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}
|
||||
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
|
||||
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
|
||||
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
|
||||
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
|
||||
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
|
||||
)
|
65
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
Normal file
65
vendor/k8s.io/kubernetes/test/e2e/auth/BUILD
generated
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"audit.go",
|
||||
"certificates.go",
|
||||
"framework.go",
|
||||
"metadata_concealment.go",
|
||||
"node_authz.go",
|
||||
"pod_security_policy.go",
|
||||
"service_accounts.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/auth",
|
||||
deps = [
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/test/e2e/auth/OWNERS
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/test/e2e/auth/OWNERS
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
reviewers:
|
||||
- liggitt
|
||||
- mikedanese
|
||||
- smarterclayton
|
||||
- sttts
|
||||
- tallclair
|
||||
- ericchiang
|
||||
approvers:
|
||||
- liggitt
|
||||
- mikedanese
|
||||
- smarterclayton
|
||||
- sttts
|
||||
- tallclair
|
||||
- ericchiang
|
706
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
Normal file
706
vendor/k8s.io/kubernetes/test/e2e/auth/audit.go
generated
vendored
Normal file
@ -0,0 +1,706 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apiserver/pkg/apis/audit/v1beta1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/evanphx/json-patch"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
watchTestTimeout int64 = 1
|
||||
auditTestUser = "kubecfg"
|
||||
|
||||
crd = testserver.NewRandomNameCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
|
||||
crdName = strings.SplitN(crd.Name, ".", 2)[0]
|
||||
crdNamespace = strings.SplitN(crd.Name, ".", 2)[1]
|
||||
|
||||
watchOptions = metav1.ListOptions{TimeoutSeconds: &watchTestTimeout}
|
||||
patch, _ = json.Marshal(jsonpatch.Patch{})
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Advanced Audit", func() {
|
||||
f := framework.NewDefaultFramework("audit")
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
})
|
||||
|
||||
// TODO: Get rid of [DisabledForLargeClusters] when feature request #53455 is ready.
|
||||
It("should audit API calls [DisabledForLargeClusters]", func() {
|
||||
namespace := f.Namespace.Name
|
||||
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err, "failed to load config")
|
||||
apiExtensionClient, err := clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err, "failed to initialize apiExtensionClient")
|
||||
|
||||
testCases := []struct {
|
||||
action func()
|
||||
events []auditEvent
|
||||
}{
|
||||
// Create, get, update, patch, delete, list, watch pods.
|
||||
{
|
||||
func() {
|
||||
pod := &apiv1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-pod",
|
||||
},
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
}},
|
||||
},
|
||||
}
|
||||
updatePod := func(pod *apiv1.Pod) {}
|
||||
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
_, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get audit-pod")
|
||||
|
||||
podChan, err := f.PodClient().Watch(watchOptions)
|
||||
framework.ExpectNoError(err, "failed to create watch for pods")
|
||||
for range podChan.ResultChan() {
|
||||
}
|
||||
|
||||
f.PodClient().Update(pod.Name, updatePod)
|
||||
|
||||
_, err = f.PodClient().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to list pods")
|
||||
|
||||
_, err = f.PodClient().Patch(pod.Name, types.JSONPatchType, patch)
|
||||
framework.ExpectNoError(err, "failed to patch pod")
|
||||
|
||||
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace),
|
||||
"create",
|
||||
201,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods/audit-pod", namespace),
|
||||
"get",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace),
|
||||
"list",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseStarted,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods/audit-pod", namespace),
|
||||
"update",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods/audit-pod", namespace),
|
||||
"patch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/pods/audit-pod", namespace),
|
||||
"delete",
|
||||
200,
|
||||
auditTestUser,
|
||||
"pods",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Create, get, update, patch, delete, list, watch deployments.
|
||||
{
|
||||
func() {
|
||||
podLabels := map[string]string{"name": "audit-deployment-pod"}
|
||||
d := framework.NewDeployment("audit-deployment", int32(1), podLabels, "redis", imageutils.GetE2EImage(imageutils.Redis), extensions.RecreateDeploymentStrategyType)
|
||||
|
||||
_, err := f.ClientSet.Extensions().Deployments(namespace).Create(d)
|
||||
framework.ExpectNoError(err, "failed to create audit-deployment")
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get audit-deployment")
|
||||
|
||||
deploymentChan, err := f.ClientSet.Extensions().Deployments(namespace).Watch(watchOptions)
|
||||
framework.ExpectNoError(err, "failed to create watch for deployments")
|
||||
for range deploymentChan.ResultChan() {
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).Update(d)
|
||||
framework.ExpectNoError(err, "failed to update audit-deployment")
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
|
||||
framework.ExpectNoError(err, "failed to patch deployment")
|
||||
|
||||
_, err = f.ClientSet.Extensions().Deployments(namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to create list deployments")
|
||||
|
||||
err = f.ClientSet.Extensions().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete deployments")
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace),
|
||||
"create",
|
||||
201,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"get",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments", namespace),
|
||||
"list",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseStarted,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequest,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"update",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"patch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
}, {
|
||||
v1beta1.LevelRequestResponse,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/apis/extensions/v1beta1/namespaces/%s/deployments/audit-deployment", namespace),
|
||||
"delete",
|
||||
200,
|
||||
auditTestUser,
|
||||
"deployments",
|
||||
namespace,
|
||||
true,
|
||||
true,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Create, get, update, patch, delete, list, watch configmaps.
|
||||
{
|
||||
func() {
|
||||
configMap := &apiv1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-configmap",
|
||||
},
|
||||
Data: map[string]string{
|
||||
"map-key": "map-value",
|
||||
},
|
||||
}
|
||||
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(configMap)
|
||||
framework.ExpectNoError(err, "failed to create audit-configmap")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get audit-configmap")
|
||||
|
||||
configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(watchOptions)
|
||||
framework.ExpectNoError(err, "failed to create watch for config maps")
|
||||
for range configMapChan.ResultChan() {
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(configMap)
|
||||
framework.ExpectNoError(err, "failed to update audit-configmap")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch)
|
||||
framework.ExpectNoError(err, "failed to patch configmap")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to list config maps")
|
||||
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete audit-configmap")
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps", namespace),
|
||||
"create",
|
||||
201,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
"get",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps", namespace),
|
||||
"list",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseStarted,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
"update",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
"patch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/configmaps/audit-configmap", namespace),
|
||||
"delete",
|
||||
200,
|
||||
auditTestUser,
|
||||
"configmaps",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Create, get, update, patch, delete, list, watch secrets.
|
||||
{
|
||||
func() {
|
||||
secret := &apiv1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "audit-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"top-secret": []byte("foo-bar"),
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().Secrets(namespace).Create(secret)
|
||||
framework.ExpectNoError(err, "failed to create audit-secret")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get audit-secret")
|
||||
|
||||
secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(watchOptions)
|
||||
framework.ExpectNoError(err, "failed to create watch for secrets")
|
||||
for range secretChan.ResultChan() {
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Secrets(namespace).Update(secret)
|
||||
framework.ExpectNoError(err, "failed to update audit-secret")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch)
|
||||
framework.ExpectNoError(err, "failed to patch secret")
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "failed to list secrets")
|
||||
|
||||
err = f.ClientSet.CoreV1().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "failed to delete audit-secret")
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets", namespace),
|
||||
"create",
|
||||
201,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets/audit-secret", namespace),
|
||||
"get",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets", namespace),
|
||||
"list",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseStarted,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets?timeoutSeconds=%d&watch=true", namespace, watchTestTimeout),
|
||||
"watch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets/audit-secret", namespace),
|
||||
"update",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets/audit-secret", namespace),
|
||||
"patch",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
}, {
|
||||
v1beta1.LevelMetadata,
|
||||
v1beta1.StageResponseComplete,
|
||||
fmt.Sprintf("/api/v1/namespaces/%s/secrets/audit-secret", namespace),
|
||||
"delete",
|
||||
200,
|
||||
auditTestUser,
|
||||
"secrets",
|
||||
namespace,
|
||||
false,
|
||||
false,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Create and delete custom resource definition.
|
||||
{
|
||||
func() {
|
||||
_, err = testserver.CreateNewCustomResourceDefinition(crd, apiExtensionClient, f.ClientPool)
|
||||
framework.ExpectNoError(err, "failed to create custom resource definition")
|
||||
testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
},
|
||||
[]auditEvent{
|
||||
{
|
||||
level: v1beta1.LevelRequestResponse,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: "/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions",
|
||||
verb: "create",
|
||||
code: 201,
|
||||
user: auditTestUser,
|
||||
resource: "customresourcedefinitions",
|
||||
requestObject: true,
|
||||
responseObject: true,
|
||||
}, {
|
||||
level: v1beta1.LevelMetadata,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/%s/v1beta1/%s", crdNamespace, crdName),
|
||||
verb: "create",
|
||||
code: 201,
|
||||
user: auditTestUser,
|
||||
resource: crdName,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
}, {
|
||||
level: v1beta1.LevelRequestResponse,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/apiextensions.k8s.io/v1beta1/customresourcedefinitions/%s", crd.Name),
|
||||
verb: "delete",
|
||||
code: 200,
|
||||
user: auditTestUser,
|
||||
resource: "customresourcedefinitions",
|
||||
requestObject: false,
|
||||
responseObject: true,
|
||||
}, {
|
||||
level: v1beta1.LevelMetadata,
|
||||
stage: v1beta1.StageResponseComplete,
|
||||
requestURI: fmt.Sprintf("/apis/%s/v1beta1/%s/setup-instance", crdNamespace, crdName),
|
||||
verb: "delete",
|
||||
code: 200,
|
||||
user: auditTestUser,
|
||||
resource: crdName,
|
||||
requestObject: false,
|
||||
responseObject: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedEvents := []auditEvent{}
|
||||
for _, t := range testCases {
|
||||
t.action()
|
||||
expectedEvents = append(expectedEvents, t.events...)
|
||||
}
|
||||
|
||||
expectAuditLines(f, expectedEvents)
|
||||
})
|
||||
})
|
||||
|
||||
type auditEvent struct {
|
||||
level v1beta1.Level
|
||||
stage v1beta1.Stage
|
||||
requestURI string
|
||||
verb string
|
||||
code int32
|
||||
user string
|
||||
resource string
|
||||
namespace string
|
||||
requestObject bool
|
||||
responseObject bool
|
||||
}
|
||||
|
||||
// Search the audit log for the expected audit lines.
|
||||
func expectAuditLines(f *framework.Framework, expected []auditEvent) {
|
||||
expectations := map[auditEvent]bool{}
|
||||
for _, event := range expected {
|
||||
expectations[event] = false
|
||||
}
|
||||
|
||||
// Fetch the log stream.
|
||||
stream, err := f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/logs/kube-apiserver-audit.log").Stream()
|
||||
framework.ExpectNoError(err, "could not read audit log")
|
||||
defer stream.Close()
|
||||
|
||||
scanner := bufio.NewScanner(stream)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
event, err := parseAuditLine(line)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// If the event was expected, mark it as found.
|
||||
if _, found := expectations[event]; found {
|
||||
expectations[event] = true
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(scanner.Err(), "error reading audit log")
|
||||
|
||||
for event, found := range expectations {
|
||||
Expect(found).To(BeTrue(), "Event %#v not found!", event)
|
||||
}
|
||||
}
|
||||
|
||||
func parseAuditLine(line string) (auditEvent, error) {
|
||||
var e v1beta1.Event
|
||||
if err := json.Unmarshal([]byte(line), &e); err != nil {
|
||||
return auditEvent{}, err
|
||||
}
|
||||
event := auditEvent{
|
||||
level: e.Level,
|
||||
stage: e.Stage,
|
||||
requestURI: e.RequestURI,
|
||||
verb: e.Verb,
|
||||
user: e.User.Username,
|
||||
}
|
||||
if e.ObjectRef != nil {
|
||||
event.namespace = e.ObjectRef.Namespace
|
||||
event.resource = e.ObjectRef.Resource
|
||||
}
|
||||
if e.ResponseStatus != nil {
|
||||
event.code = e.ResponseStatus.Code
|
||||
}
|
||||
if e.ResponseObject != nil {
|
||||
event.responseObject = true
|
||||
}
|
||||
if e.RequestObject != nil {
|
||||
event.requestObject = true
|
||||
}
|
||||
return event, nil
|
||||
}
|
121
vendor/k8s.io/kubernetes/test/e2e/auth/certificates.go
generated
vendored
Normal file
121
vendor/k8s.io/kubernetes/test/e2e/auth/certificates.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/certificates/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1beta1client "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
|
||||
"k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Certificates API", func() {
|
||||
f := framework.NewDefaultFramework("certificates")
|
||||
|
||||
It("should support building a client with a CSR", func() {
|
||||
const commonName = "tester-csr"
|
||||
|
||||
pk, err := cert.NewPrivateKey()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pkder := x509.MarshalPKCS1PrivateKey(pk)
|
||||
pkpem := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: pkder,
|
||||
})
|
||||
|
||||
csrb, err := cert.MakeCSR(pk, &pkix.Name{CommonName: commonName, Organization: []string{"system:masters"}}, nil, nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
csr := &v1beta1.CertificateSigningRequest{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: commonName + "-",
|
||||
},
|
||||
Spec: v1beta1.CertificateSigningRequestSpec{
|
||||
Request: csrb,
|
||||
Usages: []v1beta1.KeyUsage{
|
||||
v1beta1.UsageSigning,
|
||||
v1beta1.UsageKeyEncipherment,
|
||||
v1beta1.UsageClientAuth,
|
||||
},
|
||||
},
|
||||
}
|
||||
csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests()
|
||||
|
||||
framework.Logf("creating CSR")
|
||||
csr, err = csrs.Create(csr)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
csrName := csr.Name
|
||||
|
||||
framework.Logf("approving CSR")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr.Status.Conditions = []v1beta1.CertificateSigningRequestCondition{
|
||||
{
|
||||
Type: v1beta1.CertificateApproved,
|
||||
Reason: "E2E",
|
||||
Message: "Set from an e2e test",
|
||||
},
|
||||
}
|
||||
csr, err = csrs.UpdateApproval(csr)
|
||||
if err != nil {
|
||||
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
|
||||
framework.Logf("err updating approval: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
framework.Logf("waiting for CSR to be signed")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(csr.Status.Certificate) == 0 {
|
||||
framework.Logf("csr not signed yet")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
framework.Logf("testing the client")
|
||||
rcfg, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
rcfg.TLSClientConfig.CertData = csr.Status.Certificate
|
||||
rcfg.TLSClientConfig.KeyData = pkpem
|
||||
rcfg.TLSClientConfig.CertFile = ""
|
||||
rcfg.BearerToken = ""
|
||||
rcfg.AuthProvider = nil
|
||||
rcfg.Username = ""
|
||||
rcfg.Password = ""
|
||||
|
||||
newClient, err := v1beta1client.NewForConfig(rcfg)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(csrName, nil))
|
||||
})
|
||||
})
|
23
vendor/k8s.io/kubernetes/test/e2e/auth/framework.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e/auth/framework.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-auth] "+text, body)
|
||||
}
|
63
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
Normal file
63
vendor/k8s.io/kubernetes/test/e2e/auth/metadata_concealment.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Metadata Concealment", func() {
|
||||
f := framework.NewDefaultFramework("metadata-concealment")
|
||||
|
||||
It("should run a check-metadata-concealment job to completion", func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
By("Creating a job")
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "check-metadata-concealment",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "check-metadata-concealment",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "check-metadata-concealment",
|
||||
Image: "gcr.io/google_containers/check-metadata-concealment:v0.0.2",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Ensuring job reaches completions")
|
||||
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
159
vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go
generated
vendored
Normal file
159
vendor/k8s.io/kubernetes/test/e2e/auth/node_authz.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
NodesGroup = "system:nodes"
|
||||
NodeNamePrefix = "system:node:"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
|
||||
f := framework.NewDefaultFramework("node-authz")
|
||||
// client that will impersonate a node
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
var asUser string
|
||||
var defaultSaSecret string
|
||||
var nodeName string
|
||||
BeforeEach(func() {
|
||||
ns = f.Namespace.Name
|
||||
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeList.Items)).NotTo(Equal(0))
|
||||
nodeName = nodeList.Items[0].Name
|
||||
asUser = NodeNamePrefix + nodeName
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
|
||||
Expect(len(sa.Secrets)).NotTo(Equal(0))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defaultSaSecret = sa.Secrets[0].Name
|
||||
By("Creating a kubernetes client that impersonates a node")
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: asUser,
|
||||
Groups: []string{NodesGroup},
|
||||
}
|
||||
c, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
})
|
||||
It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("Getting an existent secret should exit with the Forbidden error", func() {
|
||||
_, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("Getting a secret for a workload the node has access to should succeed", func() {
|
||||
By("Create a secret for testing")
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: "node-auth-secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data": []byte("keep it secret"),
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Node should not get the secret")
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
|
||||
By("Create a pod that use the secret")
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pause",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
NodeName: nodeName,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "node-auth-secret",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: secret.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Pods(ns).Create(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("The node should able to access the secret")
|
||||
err = wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to create an other node", func() {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo"},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Node",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
}
|
||||
By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
_, err := c.CoreV1().Nodes().Create(node)
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
|
||||
It("A node shouldn't be able to delete an other node", func() {
|
||||
By(fmt.Sprintf("Create node foo by user: %v", asUser))
|
||||
err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{})
|
||||
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
|
||||
})
|
||||
})
|
316
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
Normal file
316
vendor/k8s.io/kubernetes/test/e2e/auth/pod_security_policy.go
generated
vendored
Normal file
@ -0,0 +1,316 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
restrictivePSPTemplate = &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "restrictive",
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RequiredDropCapabilities: []corev1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []extensionsv1beta1.FSType{
|
||||
extensionsv1beta1.ConfigMap,
|
||||
extensionsv1beta1.EmptyDir,
|
||||
extensionsv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
extensionsv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
f := framework.NewDefaultFramework("podsecuritypolicy")
|
||||
f.SkipPrivilegedPSPBinding = true
|
||||
|
||||
// Client that will impersonate the default service account, in order to run
|
||||
// with reduced privileges.
|
||||
var c clientset.Interface
|
||||
var ns string // Test namespace, for convenience
|
||||
BeforeEach(func() {
|
||||
if !framework.IsPodSecurityPolicyEnabled(f) {
|
||||
framework.Skipf("PodSecurityPolicy not enabled")
|
||||
}
|
||||
if !framework.IsRBACEnabled(f) {
|
||||
framework.Skipf("RBAC not enabled")
|
||||
}
|
||||
ns = f.Namespace.Name
|
||||
|
||||
By("Creating a kubernetes client that impersonates the default service account")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: serviceaccount.MakeUsername(ns, "default"),
|
||||
Groups: serviceaccount.MakeGroupNames(ns),
|
||||
}
|
||||
c, err = clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Binding the edit role to the default SA")
|
||||
framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "edit", ns,
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"})
|
||||
})
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
_, err := c.Core().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
It("should enforce the restricted PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSP(f, restrictivePSPTemplate)
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.Core().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
_, err := c.Core().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSP(f, restrictivePSPTemplate)
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
p, err := c.Core().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.Core().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func expectForbidden(err error) {
|
||||
Expect(err).To(HaveOccurred(), "should be forbidden")
|
||||
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
|
||||
}
|
||||
|
||||
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
By("Running a privileged pod", func() {
|
||||
privileged := restrictedPod(f, "privileged")
|
||||
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(privileged)
|
||||
})
|
||||
|
||||
By("Running a HostPath pod", func() {
|
||||
hostpath := restrictedPod(f, "hostpath")
|
||||
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "hp",
|
||||
MountPath: "/hp",
|
||||
}}
|
||||
hostpath.Spec.Volumes = []v1.Volume{{
|
||||
Name: "hp",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/tmp"},
|
||||
},
|
||||
}}
|
||||
tester(hostpath)
|
||||
})
|
||||
|
||||
By("Running a HostNetwork pod", func() {
|
||||
hostnet := restrictedPod(f, "hostnet")
|
||||
hostnet.Spec.HostNetwork = true
|
||||
tester(hostnet)
|
||||
})
|
||||
|
||||
By("Running a HostPID pod", func() {
|
||||
hostpid := restrictedPod(f, "hostpid")
|
||||
hostpid.Spec.HostPID = true
|
||||
tester(hostpid)
|
||||
})
|
||||
|
||||
By("Running a HostIPC pod", func() {
|
||||
hostipc := restrictedPod(f, "hostipc")
|
||||
hostipc.Spec.HostIPC = true
|
||||
tester(hostipc)
|
||||
})
|
||||
|
||||
if common.IsAppArmorSupported() {
|
||||
By("Running a custom AppArmor profile pod", func() {
|
||||
aa := restrictedPod(f, "apparmor")
|
||||
// Every node is expected to have the docker-default profile.
|
||||
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
|
||||
tester(aa)
|
||||
})
|
||||
}
|
||||
|
||||
By("Running an unconfined Seccomp pod", func() {
|
||||
unconfined := restrictedPod(f, "seccomp")
|
||||
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
By("Running a CAP_SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod(f, "sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"CAP_SYS_ADMIN"},
|
||||
}
|
||||
sysadmin.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(sysadmin)
|
||||
})
|
||||
}
|
||||
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *extensionsv1beta1.PodSecurityPolicy) (psp *extensionsv1beta1.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
ns := f.Namespace.Name
|
||||
name := fmt.Sprintf("%s-%s", ns, psp.Name)
|
||||
psp.Name = name
|
||||
psp, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
framework.ExpectNoError(err, "Failed to create PSP")
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{name},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to create PSP role")
|
||||
|
||||
// Bind the role to the namespace.
|
||||
framework.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: ns,
|
||||
Name: "default",
|
||||
})
|
||||
framework.ExpectNoError(framework.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
|
||||
return psp, func() {
|
||||
// Cleanup non-namespaced PSP object.
|
||||
f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
apparmor.ContainerAnnotationKeyPrefix + "pause": apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RunAsUser: intPtr(65534),
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func intPtr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
376
vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go
generated
vendored
Normal file
376
vendor/k8s.io/kubernetes/test/e2e/auth/service_accounts.go
generated
vendored
Normal file
@ -0,0 +1,376 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
|
||||
|
||||
var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
f := framework.NewDefaultFramework("svcaccounts")
|
||||
|
||||
It("should ensure a single API token exists", func() {
|
||||
// wait for the service account to reference a single secret
|
||||
var secrets []v1.ObjectReference
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
|
||||
By("waiting for a single token reference")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("default service account was not found")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
switch len(sa.Secrets) {
|
||||
case 0:
|
||||
framework.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
case 1:
|
||||
framework.Logf("default service account has a single secret reference")
|
||||
secrets = sa.Secrets
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
|
||||
}
|
||||
}))
|
||||
|
||||
// make sure the reference doesn't flutter
|
||||
{
|
||||
By("ensuring the single token reference persists")
|
||||
time.Sleep(2 * time.Second)
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(sa.Secrets).To(Equal(secrets))
|
||||
}
|
||||
|
||||
// delete the referenced secret
|
||||
By("deleting the service account token")
|
||||
framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil))
|
||||
|
||||
// wait for the referenced secret to be removed, and another one autocreated
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("waiting for a new token reference")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
switch len(sa.Secrets) {
|
||||
case 0:
|
||||
framework.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
case 1:
|
||||
if sa.Secrets[0] == secrets[0] {
|
||||
framework.Logf("default service account still has the deleted secret reference")
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("default service account has a new single secret reference")
|
||||
secrets = sa.Secrets
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
|
||||
}
|
||||
}))
|
||||
|
||||
// make sure the reference doesn't flutter
|
||||
{
|
||||
By("ensuring the single token reference persists")
|
||||
time.Sleep(2 * time.Second)
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(sa.Secrets).To(Equal(secrets))
|
||||
}
|
||||
|
||||
// delete the reference from the service account
|
||||
By("deleting the reference to the service account token")
|
||||
{
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
sa.Secrets = nil
|
||||
_, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(sa)
|
||||
framework.ExpectNoError(updateErr)
|
||||
}
|
||||
|
||||
// wait for another one to be autocreated
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("waiting for a new token to be created and added")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
switch len(sa.Secrets) {
|
||||
case 0:
|
||||
framework.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
case 1:
|
||||
framework.Logf("default service account has a new single secret reference")
|
||||
secrets = sa.Secrets
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("default service account has too many secret references: %#v", sa.Secrets)
|
||||
}
|
||||
}))
|
||||
|
||||
// make sure the reference doesn't flutter
|
||||
{
|
||||
By("ensuring the single token reference persists")
|
||||
time.Sleep(2 * time.Second)
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(sa.Secrets).To(Equal(secrets))
|
||||
}
|
||||
})
|
||||
|
||||
framework.ConformanceIt("should mount an API token into pods ", func() {
|
||||
var tokenContent string
|
||||
var rootCAContent string
|
||||
|
||||
// Standard get, update retry loop
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("getting the auto-created API token")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("default service account was not found")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
if len(sa.Secrets) == 0 {
|
||||
framework.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
}
|
||||
for _, secretRef := range sa.Secrets {
|
||||
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
|
||||
continue
|
||||
}
|
||||
if secret.Type == v1.SecretTypeServiceAccountToken {
|
||||
tokenContent = string(secret.Data[v1.ServiceAccountTokenKey])
|
||||
rootCAContent = string(secret.Data[v1.ServiceAccountRootCAKey])
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("default service account has no secret references to valid service account tokens")
|
||||
return false, nil
|
||||
}))
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "pod-service-account-" + string(uuid.NewUUID()) + "-",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "token-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "root-ca-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey),
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: "namespace-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
|
||||
},
|
||||
})
|
||||
|
||||
f.TestContainerOutput("consume service account token", pod, 0, []string{
|
||||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent),
|
||||
})
|
||||
f.TestContainerOutput("consume service account root CA", pod, 1, []string{
|
||||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent),
|
||||
})
|
||||
|
||||
f.TestContainerOutput("consume service account namespace", pod, 2, []string{
|
||||
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
|
||||
})
|
||||
})
|
||||
|
||||
framework.ConformanceIt("should allow opting out of API token automount ", func() {
|
||||
var err error
|
||||
trueValue := true
|
||||
falseValue := false
|
||||
mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue}
|
||||
nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue}
|
||||
mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(mountSA)
|
||||
framework.ExpectNoError(err)
|
||||
nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(nomountSA)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Standard get, update retry loop
|
||||
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
|
||||
By("getting the auto-created API token")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("mount service account was not found")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("error getting mount service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
if len(sa.Secrets) == 0 {
|
||||
framework.Logf("mount service account has no secret references")
|
||||
return false, nil
|
||||
}
|
||||
for _, secretRef := range sa.Secrets {
|
||||
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
|
||||
continue
|
||||
}
|
||||
if secret.Type == v1.SecretTypeServiceAccountToken {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("default service account has no secret references to valid service account tokens")
|
||||
return false, nil
|
||||
}))
|
||||
|
||||
testcases := []struct {
|
||||
PodName string
|
||||
ServiceAccountName string
|
||||
AutomountPodSpec *bool
|
||||
ExpectTokenVolume bool
|
||||
}{
|
||||
{
|
||||
PodName: "pod-service-account-defaultsa",
|
||||
ServiceAccountName: "default",
|
||||
AutomountPodSpec: nil,
|
||||
ExpectTokenVolume: true, // default is true
|
||||
},
|
||||
{
|
||||
PodName: "pod-service-account-mountsa",
|
||||
ServiceAccountName: mountSA.Name,
|
||||
AutomountPodSpec: nil,
|
||||
ExpectTokenVolume: true,
|
||||
},
|
||||
{
|
||||
PodName: "pod-service-account-nomountsa",
|
||||
ServiceAccountName: nomountSA.Name,
|
||||
AutomountPodSpec: nil,
|
||||
ExpectTokenVolume: false,
|
||||
},
|
||||
|
||||
// Make sure pod spec trumps when opting in
|
||||
{
|
||||
PodName: "pod-service-account-defaultsa-mountspec",
|
||||
ServiceAccountName: "default",
|
||||
AutomountPodSpec: &trueValue,
|
||||
ExpectTokenVolume: true,
|
||||
},
|
||||
{
|
||||
PodName: "pod-service-account-mountsa-mountspec",
|
||||
ServiceAccountName: mountSA.Name,
|
||||
AutomountPodSpec: &trueValue,
|
||||
ExpectTokenVolume: true,
|
||||
},
|
||||
{
|
||||
PodName: "pod-service-account-nomountsa-mountspec",
|
||||
ServiceAccountName: nomountSA.Name,
|
||||
AutomountPodSpec: &trueValue,
|
||||
ExpectTokenVolume: true, // pod spec trumps
|
||||
},
|
||||
|
||||
// Make sure pod spec trumps when opting out
|
||||
{
|
||||
PodName: "pod-service-account-defaultsa-nomountspec",
|
||||
ServiceAccountName: "default",
|
||||
AutomountPodSpec: &falseValue,
|
||||
ExpectTokenVolume: false, // pod spec trumps
|
||||
},
|
||||
{
|
||||
PodName: "pod-service-account-mountsa-nomountspec",
|
||||
ServiceAccountName: mountSA.Name,
|
||||
AutomountPodSpec: &falseValue,
|
||||
ExpectTokenVolume: false, // pod spec trumps
|
||||
},
|
||||
{
|
||||
PodName: "pod-service-account-nomountsa-nomountspec",
|
||||
ServiceAccountName: nomountSA.Name,
|
||||
AutomountPodSpec: &falseValue,
|
||||
ExpectTokenVolume: false, // pod spec trumps
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: tc.PodName},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Name: "token-test", Image: mountImage}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
ServiceAccountName: tc.ServiceAccountName,
|
||||
AutomountServiceAccountToken: tc.AutomountPodSpec,
|
||||
},
|
||||
}
|
||||
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("created pod %s", tc.PodName)
|
||||
|
||||
hasServiceAccountTokenVolume := false
|
||||
for _, c := range createdPod.Spec.Containers {
|
||||
for _, vm := range c.VolumeMounts {
|
||||
if vm.MountPath == serviceaccount.DefaultAPITokenMountPath {
|
||||
hasServiceAccountTokenVolume = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if hasServiceAccountTokenVolume != tc.ExpectTokenVolume {
|
||||
framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
|
||||
} else {
|
||||
framework.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
64
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
Normal file
64
vendor/k8s.io/kubernetes/test/e2e/autoscaling/BUILD
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"autoscaling_timer.go",
|
||||
"cluster_autoscaler_scalability.go",
|
||||
"cluster_size_autoscaling.go",
|
||||
"custom_metrics_autoscaling.go",
|
||||
"dns_autoscaling.go",
|
||||
"framework.go",
|
||||
"horizontal_pod_autoscaling.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/autoscaling",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/oauth2/google:go_default_library",
|
||||
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/scheduling/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
14
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS
generated
vendored
Normal file
14
vendor/k8s.io/kubernetes/test/e2e/autoscaling/OWNERS
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
reviewers:
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- mwielgus
|
||||
- wasylkowski
|
||||
approvers:
|
||||
- aleksandra-malinowska
|
||||
- bskiba
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
- mwielgus
|
||||
- wasylkowski
|
115
vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go
generated
vendored
Normal file
115
vendor/k8s.io/kubernetes/test/e2e/autoscaling/autoscaling_timer.go
generated
vendored
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
|
||||
f := framework.NewDefaultFramework("autoscaling")
|
||||
|
||||
SIGDescribe("Autoscaling a service", func() {
|
||||
BeforeEach(func() {
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Skipf("test expects Cluster Autoscaler to be enabled")
|
||||
}
|
||||
})
|
||||
|
||||
Context("from 1 pod and 3 nodes to 8 pods and >=4 nodes", func() {
|
||||
const nodesNum = 3 // Expect there to be 3 nodes before and after the test.
|
||||
var nodeGroupName string // Set by BeforeEach, used by AfterEach to scale this node group down after the test.
|
||||
var nodes *v1.NodeList // Set by BeforeEach, used by Measure to calculate CPU request based on node's sizes.
|
||||
|
||||
BeforeEach(func() {
|
||||
// Make sure there is only 1 node group, otherwise this test becomes useless.
|
||||
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
|
||||
if len(nodeGroups) != 1 {
|
||||
framework.Skipf("test expects 1 node group, found %d", len(nodeGroups))
|
||||
}
|
||||
nodeGroupName = nodeGroups[0]
|
||||
|
||||
// Make sure the node group has exactly 'nodesNum' nodes, otherwise this test becomes useless.
|
||||
nodeGroupSize, err := framework.GroupSize(nodeGroupName)
|
||||
framework.ExpectNoError(err)
|
||||
if nodeGroupSize != nodesNum {
|
||||
framework.Skipf("test expects %d nodes, found %d", nodesNum, nodeGroupSize)
|
||||
}
|
||||
|
||||
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
|
||||
nodes = framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
schedulableCount := len(nodes.Items)
|
||||
Expect(schedulableCount).To(Equal(nodeGroupSize), "not all nodes are schedulable")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Attempt cleanup only if a node group was targeted for scale up.
|
||||
// Otherwise the test was probably skipped and we'll get a gcloud error due to invalid parameters.
|
||||
if len(nodeGroupName) > 0 {
|
||||
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
|
||||
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
|
||||
}
|
||||
})
|
||||
|
||||
Measure("takes less than 15 minutes", func(b Benchmarker) {
|
||||
// Measured over multiple samples, scaling takes 10 +/- 2 minutes, so 15 minutes should be fully sufficient.
|
||||
const timeToWait = 15 * time.Minute
|
||||
|
||||
// Calculate the CPU request of the service.
|
||||
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
|
||||
// Make it so that 'nodesNum' pods fit perfectly per node.
|
||||
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
|
||||
nodeCpuMillis := (&nodeCpus).MilliValue()
|
||||
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
|
||||
|
||||
// Start the service we want to scale and wait for it to be up and running.
|
||||
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset)
|
||||
defer resourceConsumer.CleanUp()
|
||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||
|
||||
// Enable Horizontal Pod Autoscaler with 50% target utilization and
|
||||
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
|
||||
targetCpuUtilizationPercent := int32(50)
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCpuUtilizationPercent, 1, 10)
|
||||
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
|
||||
cpuLoad := 8 * cpuRequestMillis * int64(targetCpuUtilizationPercent) / 100 // 8 pods utilized to the target level
|
||||
resourceConsumer.ConsumeCPU(int(cpuLoad))
|
||||
|
||||
// Measure the time it takes for the service to scale to 8 pods with 50% CPU utilization each.
|
||||
b.Time("total scale-up time", func() {
|
||||
resourceConsumer.WaitForReplicas(8, timeToWait)
|
||||
})
|
||||
}, 1) // Increase to run the test more than once.
|
||||
})
|
||||
})
|
||||
})
|
550
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
Normal file
550
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_autoscaler_scalability.go
generated
vendored
Normal file
@ -0,0 +1,550 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryReservationTimeout = 5 * time.Minute
|
||||
largeResizeTimeout = 8 * time.Minute
|
||||
largeScaleUpTimeout = 10 * time.Minute
|
||||
largeScaleDownTimeout = 20 * time.Minute
|
||||
minute = 1 * time.Minute
|
||||
|
||||
maxNodes = 1000
|
||||
)
|
||||
|
||||
type clusterPredicates struct {
|
||||
nodes int
|
||||
}
|
||||
|
||||
type scaleUpTestConfig struct {
|
||||
initialNodes int
|
||||
initialPods int
|
||||
extraPods *testutils.RCConfig
|
||||
expectedResult *clusterPredicates
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", func() {
|
||||
f := framework.NewDefaultFramework("autoscaling")
|
||||
var c clientset.Interface
|
||||
var nodeCount int
|
||||
var coresPerNode int
|
||||
var memCapacityMb int
|
||||
var originalSizes map[string]int
|
||||
var sum int
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke", "kubemark")
|
||||
|
||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Skipf("test expects Cluster Autoscaler to be enabled")
|
||||
}
|
||||
|
||||
c = f.ClientSet
|
||||
if originalSizes == nil {
|
||||
originalSizes = make(map[string]int)
|
||||
sum = 0
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
}
|
||||
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
|
||||
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
|
||||
coresPerNode = int((&cpu).MilliValue() / 1000)
|
||||
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
||||
|
||||
Expect(nodeCount).Should(Equal(sum))
|
||||
|
||||
if framework.ProviderIs("gke") {
|
||||
val, err := isAutoscalerEnabled(3)
|
||||
framework.ExpectNoError(err)
|
||||
if !val {
|
||||
err = enableAutoscaler("default-pool", 3, 5)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||
setMigSizes(originalSizes)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
|
||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
s := time.Now()
|
||||
makeSchedulableLoop:
|
||||
for start := time.Now(); time.Since(start) < makeSchedulableTimeout; time.Sleep(makeSchedulableDelay) {
|
||||
for _, n := range nodes.Items {
|
||||
err = makeNodeSchedulable(c, &n, true)
|
||||
switch err.(type) {
|
||||
case CriticalAddonsOnlyError:
|
||||
continue makeSchedulableLoop
|
||||
default:
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
|
||||
})
|
||||
|
||||
It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.95)
|
||||
replicasPerNode := 10
|
||||
|
||||
additionalNodes := maxNodes - nodeCount
|
||||
replicas := additionalNodes * replicasPerNode
|
||||
additionalReservation := additionalNodes * perNodeReservation
|
||||
|
||||
// saturate cluster
|
||||
reservationCleanup := ReserveMemory(f, "some-pod", nodeCount*2, nodeCount*perNodeReservation, true, memoryReservationTimeout)
|
||||
defer reservationCleanup()
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
// configure pending pods & expected scale up
|
||||
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas, additionalReservation, largeScaleUpTimeout)
|
||||
expectedResult := createClusterPredicates(nodeCount + additionalNodes)
|
||||
config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult)
|
||||
|
||||
// run test
|
||||
testCleanup := simpleScaleUpTest(f, config)
|
||||
defer testCleanup()
|
||||
})
|
||||
|
||||
It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.95)
|
||||
replicasPerNode := 10
|
||||
additionalNodes1 := int(math.Ceil(0.7 * maxNodes))
|
||||
additionalNodes2 := int(math.Ceil(0.25 * maxNodes))
|
||||
if additionalNodes1+additionalNodes2 > maxNodes {
|
||||
additionalNodes2 = maxNodes - additionalNodes1
|
||||
}
|
||||
|
||||
replicas1 := additionalNodes1 * replicasPerNode
|
||||
replicas2 := additionalNodes2 * replicasPerNode
|
||||
|
||||
glog.Infof("cores per node: %v", coresPerNode)
|
||||
|
||||
// saturate cluster
|
||||
initialReplicas := nodeCount
|
||||
reservationCleanup := ReserveMemory(f, "some-pod", initialReplicas, nodeCount*perNodeReservation, true, memoryReservationTimeout)
|
||||
defer reservationCleanup()
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
glog.Infof("Reserved successfully")
|
||||
|
||||
// configure pending pods & expected scale up #1
|
||||
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas1, additionalNodes1*perNodeReservation, largeScaleUpTimeout)
|
||||
expectedResult := createClusterPredicates(nodeCount + additionalNodes1)
|
||||
config := createScaleUpTestConfig(nodeCount, nodeCount, rcConfig, expectedResult)
|
||||
|
||||
// run test #1
|
||||
tolerateUnreadyNodes := additionalNodes1 / 20
|
||||
tolerateUnreadyPods := (initialReplicas + replicas1) / 20
|
||||
testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
|
||||
defer testCleanup1()
|
||||
|
||||
glog.Infof("Scaled up once")
|
||||
|
||||
// configure pending pods & expected scale up #2
|
||||
rcConfig2 := reserveMemoryRCConfig(f, "extra-pod-2", replicas2, additionalNodes2*perNodeReservation, largeScaleUpTimeout)
|
||||
expectedResult2 := createClusterPredicates(nodeCount + additionalNodes1 + additionalNodes2)
|
||||
config2 := createScaleUpTestConfig(nodeCount+additionalNodes1, nodeCount+additionalNodes2, rcConfig2, expectedResult2)
|
||||
|
||||
// run test #2
|
||||
tolerateUnreadyNodes = maxNodes / 20
|
||||
tolerateUnreadyPods = (initialReplicas + replicas1 + replicas2) / 20
|
||||
testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods)
|
||||
defer testCleanup2()
|
||||
|
||||
glog.Infof("Scaled up twice")
|
||||
})
|
||||
|
||||
It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.7)
|
||||
replicas := int(math.Ceil(maxNodes * 0.7))
|
||||
totalNodes := maxNodes
|
||||
|
||||
// resize cluster to totalNodes
|
||||
newSizes := map[string]int{
|
||||
anyKey(originalSizes): totalNodes,
|
||||
}
|
||||
setMigSizes(newSizes)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
||||
|
||||
// run replicas
|
||||
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
|
||||
expectedResult := createClusterPredicates(totalNodes)
|
||||
config := createScaleUpTestConfig(totalNodes, totalNodes, rcConfig, expectedResult)
|
||||
tolerateUnreadyNodes := totalNodes / 10
|
||||
tolerateUnreadyPods := replicas / 10
|
||||
testCleanup := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
|
||||
defer testCleanup()
|
||||
|
||||
// check if empty nodes are scaled down
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool {
|
||||
return size <= replicas+3 // leaving space for non-evictable kube-system pods
|
||||
}, scaleDownTimeout))
|
||||
})
|
||||
|
||||
It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func() {
|
||||
perPodReservation := int(float64(memCapacityMb) * 0.01)
|
||||
// underutilizedNodes are 10% full
|
||||
underutilizedPerNodeReplicas := 10
|
||||
// fullNodes are 70% full
|
||||
fullPerNodeReplicas := 70
|
||||
totalNodes := maxNodes
|
||||
underutilizedRatio := 0.3
|
||||
maxDelta := 30
|
||||
|
||||
// resize cluster to totalNodes
|
||||
newSizes := map[string]int{
|
||||
anyKey(originalSizes): totalNodes,
|
||||
}
|
||||
setMigSizes(newSizes)
|
||||
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
||||
|
||||
// annotate all nodes with no-scale-down
|
||||
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
|
||||
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{
|
||||
FieldSelector: fields.Set{
|
||||
"spec.unschedulable": "false",
|
||||
}.AsSelector().String(),
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "true"))
|
||||
|
||||
// distribute pods using replication controllers taking up space that should
|
||||
// be empty after pods are distributed
|
||||
underutilizedNodesNum := int(float64(maxNodes) * underutilizedRatio)
|
||||
fullNodesNum := totalNodes - underutilizedNodesNum
|
||||
|
||||
podDistribution := []podBatch{
|
||||
{numNodes: fullNodesNum, podsPerNode: fullPerNodeReplicas},
|
||||
{numNodes: underutilizedNodesNum, podsPerNode: underutilizedPerNodeReplicas}}
|
||||
|
||||
cleanup := distributeLoad(f, f.Namespace.Name, "10-70", podDistribution, perPodReservation,
|
||||
int(0.95*float64(memCapacityMb)), map[string]string{}, largeScaleUpTimeout)
|
||||
defer cleanup()
|
||||
|
||||
// enable scale down again
|
||||
framework.ExpectNoError(addAnnotation(f, nodes.Items, ScaleDownDisabledKey, "false"))
|
||||
|
||||
// wait for scale down to start. Node deletion takes a long time, so we just
|
||||
// wait for maximum of 30 nodes deleted
|
||||
nodesToScaleDownCount := int(float64(totalNodes) * 0.1)
|
||||
if nodesToScaleDownCount > maxDelta {
|
||||
nodesToScaleDownCount = maxDelta
|
||||
}
|
||||
expectedSize := totalNodes - nodesToScaleDownCount
|
||||
timeout := time.Duration(nodesToScaleDownCount)*time.Minute + scaleDownTimeout
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool {
|
||||
return size <= expectedSize
|
||||
}, timeout))
|
||||
})
|
||||
|
||||
It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func() {
|
||||
fullReservation := int(float64(memCapacityMb) * 0.9)
|
||||
hostPortPodReservation := int(float64(memCapacityMb) * 0.3)
|
||||
totalNodes := maxNodes
|
||||
reservedPort := 4321
|
||||
|
||||
// resize cluster to totalNodes
|
||||
newSizes := map[string]int{
|
||||
anyKey(originalSizes): totalNodes,
|
||||
}
|
||||
setMigSizes(newSizes)
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
|
||||
divider := int(float64(totalNodes) * 0.7)
|
||||
fullNodesCount := divider
|
||||
underutilizedNodesCount := totalNodes - fullNodesCount
|
||||
|
||||
By("Reserving full nodes")
|
||||
// run RC1 w/o host port
|
||||
cleanup := ReserveMemory(f, "filling-pod", fullNodesCount, fullNodesCount*fullReservation, true, largeScaleUpTimeout*2)
|
||||
defer cleanup()
|
||||
|
||||
By("Reserving host ports on remaining nodes")
|
||||
// run RC2 w/ host port
|
||||
cleanup2 := createHostPortPodsWithMemory(f, "underutilizing-host-port-pod", underutilizedNodesCount, reservedPort, underutilizedNodesCount*hostPortPodReservation, largeScaleUpTimeout)
|
||||
defer cleanup2()
|
||||
|
||||
waitForAllCaPodsReadyInNamespace(f, c)
|
||||
// wait and check scale down doesn't occur
|
||||
By(fmt.Sprintf("Sleeping %v minutes...", scaleDownTimeout.Minutes()))
|
||||
time.Sleep(scaleDownTimeout)
|
||||
|
||||
By("Checking if the number of nodes is as expected")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
glog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
|
||||
Expect(len(nodes.Items)).Should(Equal(totalNodes))
|
||||
})
|
||||
|
||||
Specify("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func() {
|
||||
// Start a number of pods saturating existing nodes.
|
||||
perNodeReservation := int(float64(memCapacityMb) * 0.80)
|
||||
replicasPerNode := 10
|
||||
initialPodReplicas := nodeCount * replicasPerNode
|
||||
initialPodsTotalMemory := nodeCount * perNodeReservation
|
||||
reservationCleanup := ReserveMemory(f, "initial-pod", initialPodReplicas, initialPodsTotalMemory, true /* wait for pods to run */, memoryReservationTimeout)
|
||||
defer reservationCleanup()
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
|
||||
// Configure a number of unschedulable pods.
|
||||
unschedulableMemReservation := memCapacityMb * 2
|
||||
unschedulablePodReplicas := 1000
|
||||
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
|
||||
timeToWait := 5 * time.Minute
|
||||
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
|
||||
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, podsConfig.Name)
|
||||
|
||||
// Ensure that no new nodes have been added so far.
|
||||
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
|
||||
|
||||
// Start a number of schedulable pods to ensure CA reacts.
|
||||
additionalNodes := maxNodes - nodeCount
|
||||
replicas := additionalNodes * replicasPerNode
|
||||
totalMemory := additionalNodes * perNodeReservation
|
||||
rcConfig := reserveMemoryRCConfig(f, "extra-pod", replicas, totalMemory, largeScaleUpTimeout)
|
||||
expectedResult := createClusterPredicates(nodeCount + additionalNodes)
|
||||
config := createScaleUpTestConfig(nodeCount, initialPodReplicas, rcConfig, expectedResult)
|
||||
|
||||
// Test that scale up happens, allowing 1000 unschedulable pods not to be scheduled.
|
||||
testCleanup := simpleScaleUpTestWithTolerance(f, config, 0, unschedulablePodReplicas)
|
||||
defer testCleanup()
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func makeUnschedulable(f *framework.Framework, nodes []v1.Node) error {
|
||||
for _, node := range nodes {
|
||||
err := makeNodeUnschedulable(f.ClientSet, &node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeSchedulable(f *framework.Framework, nodes []v1.Node) error {
|
||||
for _, node := range nodes {
|
||||
err := makeNodeSchedulable(f.ClientSet, &node, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func anyKey(input map[string]int) string {
|
||||
for k := range input {
|
||||
return k
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestConfig, tolerateMissingNodeCount int, tolerateMissingPodCount int) func() error {
|
||||
// resize cluster to start size
|
||||
// run rc based on config
|
||||
By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
|
||||
start := time.Now()
|
||||
framework.ExpectNoError(framework.RunRC(*config.extraPods))
|
||||
// check results
|
||||
if tolerateMissingNodeCount > 0 {
|
||||
// Tolerate some number of nodes not to be created.
|
||||
minExpectedNodeCount := config.expectedResult.nodes - tolerateMissingNodeCount
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
|
||||
} else {
|
||||
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
|
||||
}
|
||||
glog.Infof("cluster is increased")
|
||||
if tolerateMissingPodCount > 0 {
|
||||
framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount))
|
||||
} else {
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
|
||||
}
|
||||
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, config.extraPods.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func simpleScaleUpTest(f *framework.Framework, config *scaleUpTestConfig) func() error {
|
||||
return simpleScaleUpTestWithTolerance(f, config, 0, 0)
|
||||
}
|
||||
|
||||
func reserveMemoryRCConfig(f *framework.Framework, id string, replicas, megabytes int, timeout time.Duration) *testutils.RCConfig {
|
||||
return &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Replicas: replicas,
|
||||
MemRequest: int64(1024 * 1024 * megabytes / replicas),
|
||||
}
|
||||
}
|
||||
|
||||
func createScaleUpTestConfig(nodes, pods int, extraPods *testutils.RCConfig, expectedResult *clusterPredicates) *scaleUpTestConfig {
|
||||
return &scaleUpTestConfig{
|
||||
initialNodes: nodes,
|
||||
initialPods: pods,
|
||||
extraPods: extraPods,
|
||||
expectedResult: expectedResult,
|
||||
}
|
||||
}
|
||||
|
||||
func createClusterPredicates(nodes int) *clusterPredicates {
|
||||
return &clusterPredicates{
|
||||
nodes: nodes,
|
||||
}
|
||||
}
|
||||
|
||||
func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) error {
|
||||
for _, node := range nodes {
|
||||
oldData, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if node.Annotations == nil {
|
||||
node.Annotations = make(map[string]string)
|
||||
}
|
||||
node.Annotations[key] = value
|
||||
|
||||
newData, err := json.Marshal(node)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.ClientSet.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, port, megabytes int, timeout time.Duration) func() error {
|
||||
By(fmt.Sprintf("Running RC which reserves host port and memory"))
|
||||
request := int64(1024 * 1024 * megabytes / replicas)
|
||||
config := &testutils.RCConfig{
|
||||
Client: f.ClientSet,
|
||||
InternalClient: f.InternalClientset,
|
||||
Name: id,
|
||||
Namespace: f.Namespace.Name,
|
||||
Timeout: timeout,
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Replicas: replicas,
|
||||
HostPorts: map[string]int{"port1": port},
|
||||
MemRequest: request,
|
||||
}
|
||||
err := framework.RunRC(*config)
|
||||
framework.ExpectNoError(err)
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
|
||||
type podBatch struct {
|
||||
numNodes int
|
||||
podsPerNode int
|
||||
}
|
||||
|
||||
// distributeLoad distributes the pods in the way described by podDostribution,
|
||||
// assuming all pods will have the same memory reservation and all nodes the same
|
||||
// memory capacity. This allows us generate the load on the cluster in the exact
|
||||
// way that we want.
|
||||
//
|
||||
// To achieve this we do the following:
|
||||
// 1. Create replication controllers that eat up all the space that should be
|
||||
// empty after setup, making sure they end up on different nodes by specifying
|
||||
// conflicting host port
|
||||
// 2. Create targer RC that will generate the load on the cluster
|
||||
// 3. Remove the rcs created in 1.
|
||||
func distributeLoad(f *framework.Framework, namespace string, id string, podDistribution []podBatch,
|
||||
podMemRequestMegabytes int, nodeMemCapacity int, labels map[string]string, timeout time.Duration) func() error {
|
||||
port := 8013
|
||||
// Create load-distribution RCs with one pod per node, reserving all remaining
|
||||
// memory to force the distribution of pods for the target RCs.
|
||||
// The load-distribution RCs will be deleted on function return.
|
||||
totalPods := 0
|
||||
for i, podBatch := range podDistribution {
|
||||
totalPods += podBatch.numNodes * podBatch.podsPerNode
|
||||
remainingMem := nodeMemCapacity - podBatch.podsPerNode*podMemRequestMegabytes
|
||||
replicas := podBatch.numNodes
|
||||
cleanup := createHostPortPodsWithMemory(f, fmt.Sprintf("load-distribution%d", i), replicas, port, remainingMem*replicas, timeout)
|
||||
defer cleanup()
|
||||
}
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
|
||||
// Create the target RC
|
||||
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
|
||||
framework.ExpectNoError(framework.RunRC(*rcConfig))
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
|
||||
return func() error {
|
||||
return framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, id)
|
||||
}
|
||||
}
|
||||
|
||||
func timeTrack(start time.Time, name string) {
|
||||
elapsed := time.Since(start)
|
||||
glog.Infof("%s took %s", name, elapsed)
|
||||
}
|
1970
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
Normal file
1970
vendor/k8s.io/kubernetes/test/e2e/autoscaling/cluster_size_autoscaling.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
210
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
Normal file
210
vendor/k8s.io/kubernetes/test/e2e/autoscaling/custom_metrics_autoscaling.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
as "k8s.io/api/autoscaling/v2beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||
)
|
||||
|
||||
const (
|
||||
stackdriverExporterDeployment = "stackdriver-exporter-deployment"
|
||||
dummyDeploymentName = "dummy-deployment"
|
||||
stackdriverExporterPod = "stackdriver-exporter-pod"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
})
|
||||
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
var kubeClient clientset.Interface
|
||||
|
||||
It("should autoscale with Custom Metrics from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
|
||||
kubeClient = f.ClientSet
|
||||
testHPA(f, kubeClient)
|
||||
})
|
||||
})
|
||||
|
||||
func testHPA(f *framework.Framework, kubeClient clientset.Interface) {
|
||||
projectId := framework.TestContext.CloudConfig.ProjectID
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
|
||||
|
||||
// Hack for running tests locally, needed to authenticate in Stackdriver
|
||||
// If this is your use case, create application default credentials:
|
||||
// $ gcloud auth application-default login
|
||||
// and uncomment following lines:
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
client := oauth2.NewClient(oauth2.NoContext, ts)
|
||||
*/
|
||||
|
||||
gcmService, err := gcm.New(client)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create gcm service, %v", err)
|
||||
}
|
||||
|
||||
// Set up a cluster: create a custom metric and set up k8s-sd adapter
|
||||
err = monitoring.CreateDescriptors(gcmService, projectId)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create metric descriptor: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupDescriptors(gcmService, projectId)
|
||||
|
||||
err = monitoring.CreateAdapter()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to set up: %v", err)
|
||||
}
|
||||
defer monitoring.CleanupAdapter()
|
||||
|
||||
// Run application that exports the metric
|
||||
err = createDeploymentsToScale(f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create stackdriver-exporter pod: %v", err)
|
||||
}
|
||||
defer cleanupDeploymentsToScale(f, kubeClient)
|
||||
|
||||
// Autoscale the deployments
|
||||
err = createPodsHPA(f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create 'Pods' HPA: %v", err)
|
||||
}
|
||||
err = createObjectHPA(f, kubeClient)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to create 'Objects' HPA: %v", err)
|
||||
}
|
||||
|
||||
waitForReplicas(stackdriverExporterDeployment, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
|
||||
waitForReplicas(dummyDeploymentName, f.Namespace.ObjectMeta.Name, kubeClient, 15*time.Minute, 1)
|
||||
}
|
||||
|
||||
func createDeploymentsToScale(f *framework.Framework, cs clientset.Interface) error {
|
||||
_, err := cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(stackdriverExporterDeployment, f.Namespace.Name, 2, 100))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, 100))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Create(monitoring.StackdriverExporterDeployment(dummyDeploymentName, f.Namespace.Name, 2, 100))
|
||||
return err
|
||||
}
|
||||
|
||||
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface) {
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterDeployment, &metav1.DeleteOptions{})
|
||||
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(stackdriverExporterPod, &metav1.DeleteOptions{})
|
||||
_ = cs.Extensions().Deployments(f.Namespace.ObjectMeta.Name).Delete(dummyDeploymentName, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func createPodsHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
var minReplicas int32 = 1
|
||||
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-pods-hpa",
|
||||
Namespace: f.Namespace.ObjectMeta.Name,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.PodsMetricSourceType,
|
||||
Pods: &as.PodsMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
TargetAverageValue: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: stackdriverExporterDeployment,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func createObjectHPA(f *framework.Framework, cs clientset.Interface) error {
|
||||
var minReplicas int32 = 1
|
||||
_, err := cs.AutoscalingV2beta1().HorizontalPodAutoscalers(f.Namespace.ObjectMeta.Name).Create(&as.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-metrics-objects-hpa",
|
||||
Namespace: f.Namespace.ObjectMeta.Name,
|
||||
},
|
||||
Spec: as.HorizontalPodAutoscalerSpec{
|
||||
Metrics: []as.MetricSpec{
|
||||
{
|
||||
Type: as.ObjectMetricSourceType,
|
||||
Object: &as.ObjectMetricSource{
|
||||
MetricName: monitoring.CustomMetricName,
|
||||
Target: as.CrossVersionObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: stackdriverExporterPod,
|
||||
},
|
||||
TargetValue: *resource.NewQuantity(200, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
MaxReplicas: 3,
|
||||
MinReplicas: &minReplicas,
|
||||
ScaleTargetRef: as.CrossVersionObjectReference{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
Name: dummyDeploymentName,
|
||||
},
|
||||
},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
deployment, err := cs.Extensions().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||
}
|
||||
replicas := int(deployment.Status.ReadyReplicas)
|
||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
|
||||
}
|
||||
}
|
365
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
Normal file
365
vendor/k8s.io/kubernetes/test/e2e/autoscaling/dns_autoscaling.go
generated
vendored
Normal file
@ -0,0 +1,365 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
DNSdefaultTimeout = 5 * time.Minute
|
||||
ClusterAddonLabelKey = "k8s-app"
|
||||
DNSLabelName = "kube-dns"
|
||||
DNSAutoscalerLabelName = "kube-dns-autoscaler"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
||||
f := framework.NewDefaultFramework("dns-autoscaling")
|
||||
var c clientset.Interface
|
||||
var previousParams map[string]string
|
||||
var originDNSReplicasCount int
|
||||
var DNSParams_1 DNSParamsLinear
|
||||
var DNSParams_2 DNSParamsLinear
|
||||
var DNSParams_3 DNSParamsLinear
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
c = f.ClientSet
|
||||
|
||||
nodeCount := len(framework.GetReadySchedulableNodesOrDie(c).Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
By("Collecting original replicas count and DNS scaling params")
|
||||
var err error
|
||||
originDNSReplicasCount, err = getDNSReplicas(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pcm, err := fetchDNSScalingConfigMap(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
previousParams = pcm.Data
|
||||
|
||||
if nodeCount <= 500 {
|
||||
DNSParams_1 = DNSParamsLinear{
|
||||
nodesPerReplica: 1,
|
||||
}
|
||||
DNSParams_2 = DNSParamsLinear{
|
||||
nodesPerReplica: 2,
|
||||
}
|
||||
DNSParams_3 = DNSParamsLinear{
|
||||
nodesPerReplica: 3,
|
||||
coresPerReplica: 3,
|
||||
}
|
||||
} else {
|
||||
// In large clusters, avoid creating/deleting too many DNS pods,
|
||||
// it is supposed to be correctness test, not performance one.
|
||||
// The default setup is: 256 cores/replica, 16 nodes/replica.
|
||||
// With nodeCount > 500, nodes/13, nodes/14, nodes/15 and nodes/16
|
||||
// are different numbers.
|
||||
DNSParams_1 = DNSParamsLinear{
|
||||
nodesPerReplica: 13,
|
||||
}
|
||||
DNSParams_2 = DNSParamsLinear{
|
||||
nodesPerReplica: 14,
|
||||
}
|
||||
DNSParams_3 = DNSParamsLinear{
|
||||
nodesPerReplica: 15,
|
||||
coresPerReplica: 15,
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// This test is separated because it is slow and need to run serially.
|
||||
// Will take around 5 minutes to run on a 4 nodes cluster.
|
||||
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring intial dns autoscaling parameters")
|
||||
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for number of running and ready kube-dns pods recover")
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
_, err := framework.WaitForPodsWithLabelRunningReady(c, metav1.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
originalSizes := make(map[string]int)
|
||||
sum := 0
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := framework.GroupSize(mig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
sum += size
|
||||
}
|
||||
|
||||
By("Manually increase cluster size")
|
||||
increasedSize := 0
|
||||
increasedSizes := make(map[string]int)
|
||||
for key, val := range originalSizes {
|
||||
increasedSizes[key] = val + 1
|
||||
increasedSize += increasedSizes[key]
|
||||
}
|
||||
setMigSizes(increasedSizes)
|
||||
Expect(WaitForClusterSizeFunc(c,
|
||||
func(size int) bool { return size == increasedSize }, scaleUpTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Restoring cluster size")
|
||||
setMigSizes(originalSizes)
|
||||
Expect(framework.WaitForReadyNodes(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
|
||||
|
||||
By("Replace the dns autoscaling parameters with testing parameters")
|
||||
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
By("Restoring intial dns autoscaling parameters")
|
||||
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
|
||||
}()
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("--- Scenario: should scale kube-dns based on changed parameters ---")
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
|
||||
By("Delete the ConfigMap for autoscaler")
|
||||
err = deleteDNSScalingConfigMap(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait for the ConfigMap got re-created")
|
||||
_, err = waitForDNSConfigMapCreated(c, DNSdefaultTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_2)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_2)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
|
||||
By("--- Scenario: should recover after autoscaler pod got deleted ---")
|
||||
By("Delete the autoscaler pod for kube-dns")
|
||||
Expect(deleteDNSAutoscalerPod(c)).NotTo(HaveOccurred())
|
||||
|
||||
By("Replace the dns autoscaling parameters with another testing parameters")
|
||||
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Wait for kube-dns scaled to expected number")
|
||||
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
|
||||
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
type DNSParamsLinear struct {
|
||||
nodesPerReplica float64
|
||||
coresPerReplica float64
|
||||
min int
|
||||
max int
|
||||
}
|
||||
|
||||
type getExpectReplicasFunc func(c clientset.Interface) int
|
||||
|
||||
func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
|
||||
return func(c clientset.Interface) int {
|
||||
var replicasFromNodes float64
|
||||
var replicasFromCores float64
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c).Items
|
||||
if params.nodesPerReplica > 0 {
|
||||
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
|
||||
}
|
||||
if params.coresPerReplica > 0 {
|
||||
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
|
||||
}
|
||||
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
|
||||
}
|
||||
}
|
||||
|
||||
func getScheduableCores(nodes []v1.Node) int64 {
|
||||
var sc resource.Quantity
|
||||
for _, node := range nodes {
|
||||
if !node.Spec.Unschedulable {
|
||||
sc.Add(node.Status.Capacity[v1.ResourceCPU])
|
||||
}
|
||||
}
|
||||
|
||||
scInt64, scOk := sc.AsInt64()
|
||||
if !scOk {
|
||||
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
|
||||
return 0
|
||||
}
|
||||
return scInt64
|
||||
}
|
||||
|
||||
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cm, nil
|
||||
}
|
||||
|
||||
func deleteDNSScalingConfigMap(c clientset.Interface) error {
|
||||
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
framework.Logf("DNS autoscaling ConfigMap deleted.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func packLinearParams(params *DNSParamsLinear) map[string]string {
|
||||
paramsMap := make(map[string]string)
|
||||
paramsMap["linear"] = fmt.Sprintf("{\"nodesPerReplica\": %v,\"coresPerReplica\": %v,\"min\": %v,\"max\": %v}",
|
||||
params.nodesPerReplica,
|
||||
params.coresPerReplica,
|
||||
params.min,
|
||||
params.max)
|
||||
return paramsMap
|
||||
}
|
||||
|
||||
func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
|
||||
configMap := v1.ConfigMap{}
|
||||
configMap.ObjectMeta.Name = DNSAutoscalerLabelName
|
||||
configMap.ObjectMeta.Namespace = metav1.NamespaceSystem
|
||||
configMap.Data = params
|
||||
return &configMap
|
||||
}
|
||||
|
||||
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
|
||||
_, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(configMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
framework.Logf("DNS autoscaling ConfigMap updated.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDNSReplicas(c clientset.Interface) (int, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
deployments, err := c.ExtensionsV1beta1().Deployments(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(deployments.Items) != 1 {
|
||||
return 0, fmt.Errorf("expected 1 DNS deployment, got %v", len(deployments.Items))
|
||||
}
|
||||
|
||||
deployment := deployments.Items[0]
|
||||
return int(*(deployment.Spec.Replicas)), nil
|
||||
}
|
||||
|
||||
func deleteDNSAutoscalerPod(c clientset.Interface) error {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
|
||||
listOpts := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
return fmt.Errorf("expected 1 autoscaler pod, got %v", len(pods.Items))
|
||||
}
|
||||
|
||||
podName := pods.Items[0].Name
|
||||
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
framework.Logf("DNS autoscaling pod %v deleted.", podName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
|
||||
var current int
|
||||
var expected int
|
||||
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
|
||||
condition := func() (bool, error) {
|
||||
current, err = getDNSReplicas(c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
expected = getExpected(c)
|
||||
if current != expected {
|
||||
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
|
||||
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
|
||||
}
|
||||
framework.Logf("kube-dns reaches expected replicas: %v", expected)
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
|
||||
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
|
||||
condition := func() (bool, error) {
|
||||
configMap, err = fetchDNSScalingConfigMap(c)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err = wait.Poll(time.Second, timeout, condition); err != nil {
|
||||
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %v", err)
|
||||
}
|
||||
return configMap, nil
|
||||
}
|
23
vendor/k8s.io/kubernetes/test/e2e/autoscaling/framework.go
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/test/e2e/autoscaling/framework.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-autoscaling] "+text, body)
|
||||
}
|
172
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
Normal file
172
vendor/k8s.io/kubernetes/test/e2e/autoscaling/horizontal_pod_autoscaling.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// These tests don't seem to be running properly in parallel: issue: #20338.
|
||||
//
|
||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
|
||||
var rc *common.ResourceConsumer
|
||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||
|
||||
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
|
||||
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
|
||||
|
||||
SIGDescribe("[Serial] [Slow] Deployment", func() {
|
||||
// CPU tests via deployments
|
||||
It(titleUp, func() {
|
||||
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
|
||||
// CPU tests via deployments
|
||||
It(titleUp, func() {
|
||||
scaleUp("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
It(titleDown, func() {
|
||||
scaleDown("rs", common.KindReplicaSet, false, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
// These tests take ~20 minutes each.
|
||||
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
|
||||
// CPU tests via replication controllers
|
||||
It(titleUp+" and verify decision stability", func() {
|
||||
scaleUp("rc", common.KindRC, true, rc, f)
|
||||
})
|
||||
It(titleDown+" and verify decision stability", func() {
|
||||
scaleDown("rc", common.KindRC, true, rc, f)
|
||||
})
|
||||
})
|
||||
|
||||
// TODO: Get rid of [DisabledForLargeClusters] tag when issue #54637 is fixed.
|
||||
SIGDescribe("[DisabledForLargeClusters] ReplicationController light", func() {
|
||||
It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 150,
|
||||
perPodCPURequest: 200,
|
||||
targetCPUUtilizationPercent: 50,
|
||||
minPods: 1,
|
||||
maxPods: 2,
|
||||
firstScale: 2,
|
||||
}
|
||||
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||
})
|
||||
It("Should scale from 2 pods to 1 pod", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 2,
|
||||
totalInitialCPUUsage: 50,
|
||||
perPodCPURequest: 200,
|
||||
targetCPUUtilizationPercent: 50,
|
||||
minPods: 1,
|
||||
maxPods: 2,
|
||||
firstScale: 1,
|
||||
}
|
||||
scaleTest.run("rc-light", common.KindRC, rc, f)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// HPAScaleTest struct is used by the scale(...) function.
|
||||
type HPAScaleTest struct {
|
||||
initPods int32
|
||||
totalInitialCPUUsage int32
|
||||
perPodCPURequest int64
|
||||
targetCPUUtilizationPercent int32
|
||||
minPods int32
|
||||
maxPods int32
|
||||
firstScale int32
|
||||
firstScaleStasis time.Duration
|
||||
cpuBurst int
|
||||
secondScale int32
|
||||
secondScaleStasis time.Duration
|
||||
}
|
||||
|
||||
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
|
||||
// The initial state is defined by the initPods parameter.
|
||||
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
|
||||
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait)
|
||||
if scaleTest.firstScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
|
||||
}
|
||||
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
|
||||
rc.ConsumeCPU(scaleTest.cpuBurst)
|
||||
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
|
||||
}
|
||||
}
|
||||
|
||||
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
stasis := 0 * time.Minute
|
||||
if checkStability {
|
||||
stasis = 10 * time.Minute
|
||||
}
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 250,
|
||||
perPodCPURequest: 500,
|
||||
targetCPUUtilizationPercent: 20,
|
||||
minPods: 1,
|
||||
maxPods: 5,
|
||||
firstScale: 3,
|
||||
firstScaleStasis: stasis,
|
||||
cpuBurst: 700,
|
||||
secondScale: 5,
|
||||
}
|
||||
scaleTest.run(name, kind, rc, f)
|
||||
}
|
||||
|
||||
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
stasis := 0 * time.Minute
|
||||
if checkStability {
|
||||
stasis = 10 * time.Minute
|
||||
}
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 5,
|
||||
totalInitialCPUUsage: 375,
|
||||
perPodCPURequest: 500,
|
||||
targetCPUUtilizationPercent: 30,
|
||||
minPods: 1,
|
||||
maxPods: 5,
|
||||
firstScale: 3,
|
||||
firstScaleStasis: stasis,
|
||||
cpuBurst: 10,
|
||||
secondScale: 1,
|
||||
}
|
||||
scaleTest.run(name, kind, rc, f)
|
||||
}
|
35
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/BUILD
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["chaosmonkey.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/chaosmonkey",
|
||||
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["chaosmonkey_test.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/chaosmonkey",
|
||||
library = ":go_default_library",
|
||||
tags = ["e2e"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
156
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/chaosmonkey.go
generated
vendored
Normal file
156
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/chaosmonkey.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package chaosmonkey
|
||||
|
||||
import . "github.com/onsi/ginkgo"
|
||||
|
||||
// Disruption is the type to construct a chaosmonkey with; see Do for more information.
|
||||
type Disruption func()
|
||||
|
||||
// Test is the type to register with a chaosmonkey. A test will run asynchronously across the
|
||||
// chaosmonkey's Disruption. A Test takes a Semaphore as an argument. It should call sem.Ready()
|
||||
// once it's ready for the disruption to start and should then wait until sem.StopCh (which is a
|
||||
// <-chan struct{}) is closed, which signals that the disruption is over. It should then clean up
|
||||
// and return. See Do and Semaphore for more information.
|
||||
type Test func(sem *Semaphore)
|
||||
|
||||
// Interface can be implemented if you prefer to define tests without dealing with a Semaphore. You
|
||||
// may define a struct that implements Interface's three methods (Setup, Test, and Teardown) and
|
||||
// RegisterInterface. See RegisterInterface for more information.
|
||||
type Interface interface {
|
||||
Setup()
|
||||
Test(stopCh <-chan struct{})
|
||||
Teardown()
|
||||
}
|
||||
|
||||
type chaosmonkey struct {
|
||||
disruption Disruption
|
||||
tests []Test
|
||||
}
|
||||
|
||||
// New creates and returns a chaosmonkey, with which the caller should register Tests and call Do.
|
||||
// See Do for more information.
|
||||
func New(disruption Disruption) *chaosmonkey {
|
||||
return &chaosmonkey{
|
||||
disruption,
|
||||
[]Test{},
|
||||
}
|
||||
}
|
||||
|
||||
// Register registers the given Test with the chaosmonkey, so that the test will run over the
|
||||
// Disruption.
|
||||
func (cm *chaosmonkey) Register(test Test) {
|
||||
cm.tests = append(cm.tests, test)
|
||||
}
|
||||
|
||||
// RegisterInterface registers the given Interface with the chaosmonkey, so the chaosmonkey will
|
||||
// call Setup, Test, and Teardown properly. Test can tell that the Disruption is finished when
|
||||
// stopCh is closed.
|
||||
func (cm *chaosmonkey) RegisterInterface(in Interface) {
|
||||
cm.Register(func(sem *Semaphore) {
|
||||
in.Setup()
|
||||
sem.Ready()
|
||||
in.Test(sem.StopCh)
|
||||
in.Teardown()
|
||||
})
|
||||
}
|
||||
|
||||
// Do performs the Disruption while testing the registered Tests. Once the caller has registered
|
||||
// all Tests with the chaosmonkey, they call Do. Do starts each registered test asynchronously and
|
||||
// waits for each test to signal that it is ready by calling sem.Ready(). Do will then do the
|
||||
// Disruption, and when it's complete, close sem.StopCh to signal to the registered Tests that the
|
||||
// Disruption is over, and wait for all Tests to return.
|
||||
func (cm *chaosmonkey) Do() {
|
||||
sems := []*Semaphore{}
|
||||
// All semaphores have the same StopCh.
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
for _, test := range cm.tests {
|
||||
test := test
|
||||
sem := newSemaphore(stopCh)
|
||||
sems = append(sems, sem)
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
defer sem.done()
|
||||
test(sem)
|
||||
}()
|
||||
}
|
||||
|
||||
By("Waiting for all async tests to be ready")
|
||||
for _, sem := range sems {
|
||||
// Wait for test to be ready. We have to wait for ready *or done* because a test
|
||||
// may panic before signaling that its ready, and we shouldn't block. Since we
|
||||
// defered sem.done() above, if a test panics, it's marked as done.
|
||||
sem.waitForReadyOrDone()
|
||||
}
|
||||
|
||||
defer func() {
|
||||
close(stopCh)
|
||||
By("Waiting for async validations to complete")
|
||||
for _, sem := range sems {
|
||||
sem.waitForDone()
|
||||
}
|
||||
}()
|
||||
|
||||
By("Starting disruption")
|
||||
cm.disruption()
|
||||
By("Disruption complete; stopping async validations")
|
||||
}
|
||||
|
||||
// Semaphore is taken by a Test and provides: Ready(), for the Test to call when it's ready for the
|
||||
// disruption to start; and StopCh, the closure of which signals to the Test that the disruption is
|
||||
// finished.
|
||||
type Semaphore struct {
|
||||
readyCh chan struct{}
|
||||
StopCh <-chan struct{}
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
func newSemaphore(stopCh <-chan struct{}) *Semaphore {
|
||||
// We don't want to block on Ready() or done()
|
||||
return &Semaphore{
|
||||
make(chan struct{}, 1),
|
||||
stopCh,
|
||||
make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Ready is called by the Test to signal that the Test is ready for the disruption to start.
|
||||
func (sem *Semaphore) Ready() {
|
||||
close(sem.readyCh)
|
||||
}
|
||||
|
||||
// done is an internal method for Go to defer, both to wait for all tests to return, but also to
|
||||
// sense if a test panicked before calling Ready. See waitForReadyOrDone.
|
||||
func (sem *Semaphore) done() {
|
||||
close(sem.doneCh)
|
||||
}
|
||||
|
||||
// We would like to just check if all tests are ready, but if they fail (which Ginkgo implements as
|
||||
// a panic), they may not have called Ready(). We check done as well to see if the function has
|
||||
// already returned; if it has, we don't care if it's ready, and just continue.
|
||||
func (sem *Semaphore) waitForReadyOrDone() {
|
||||
select {
|
||||
case <-sem.readyCh:
|
||||
case <-sem.doneCh:
|
||||
}
|
||||
}
|
||||
|
||||
// waitForDone is an internal method for Go to wait on all Tests returning.
|
||||
func (sem *Semaphore) waitForDone() {
|
||||
<-sem.doneCh
|
||||
}
|
53
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/chaosmonkey_test.go
generated
vendored
Normal file
53
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/chaosmonkey_test.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package chaosmonkey
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDoWithPanic(t *testing.T) {
|
||||
var counter int64 = 0
|
||||
cm := New(func() {})
|
||||
tests := []Test{
|
||||
// No panic
|
||||
func(sem *Semaphore) {
|
||||
defer atomic.AddInt64(&counter, 1)
|
||||
sem.Ready()
|
||||
},
|
||||
// Panic after sem.Ready()
|
||||
func(sem *Semaphore) {
|
||||
defer atomic.AddInt64(&counter, 1)
|
||||
sem.Ready()
|
||||
panic("Panic after calling sem.Ready()")
|
||||
},
|
||||
// Panic before sem.Ready()
|
||||
func(sem *Semaphore) {
|
||||
defer atomic.AddInt64(&counter, 1)
|
||||
panic("Panic before calling sem.Ready()")
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
cm.Register(test)
|
||||
}
|
||||
cm.Do()
|
||||
// Check that all funcs in tests were called.
|
||||
if int(counter) != len(tests) {
|
||||
t.Errorf("Expected counter to be %v, but it was %v", len(tests), counter)
|
||||
}
|
||||
}
|
83
vendor/k8s.io/kubernetes/test/e2e/common/BUILD
generated
vendored
Normal file
83
vendor/k8s.io/kubernetes/test/e2e/common/BUILD
generated
vendored
Normal file
@ -0,0 +1,83 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"apparmor.go",
|
||||
"autoscaling_utils.go",
|
||||
"configmap.go",
|
||||
"configmap_volume.go",
|
||||
"container_probe.go",
|
||||
"docker_containers.go",
|
||||
"downward_api.go",
|
||||
"downwardapi_volume.go",
|
||||
"empty_dir.go",
|
||||
"events.go",
|
||||
"expansion.go",
|
||||
"host_path.go",
|
||||
"init_container.go",
|
||||
"kubelet_etc_hosts.go",
|
||||
"networking.go",
|
||||
"pods.go",
|
||||
"privileged.go",
|
||||
"projected.go",
|
||||
"secrets.go",
|
||||
"secrets_volume.go",
|
||||
"sysctl.go",
|
||||
"util.go",
|
||||
"volumes.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/common",
|
||||
deps = [
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
249
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go
generated
vendored
Normal file
249
vendor/k8s.io/kubernetes/test/e2e/common/apparmor.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
appArmorProfilePrefix = "e2e-apparmor-test-"
|
||||
appArmorAllowedPath = "/expect_allowed_write"
|
||||
appArmorDeniedPath = "/expect_permission_denied"
|
||||
|
||||
loaderLabelKey = "name"
|
||||
loaderLabelValue = "e2e-apparmor-loader"
|
||||
)
|
||||
|
||||
// AppArmorDistros are distros with AppArmor support
|
||||
var AppArmorDistros = []string{"gci", "ubuntu"}
|
||||
|
||||
func IsAppArmorSupported() bool {
|
||||
return framework.NodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
||||
|
||||
func SkipIfAppArmorNotSupported() {
|
||||
framework.SkipUnlessNodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
||||
|
||||
func LoadAppArmorProfiles(f *framework.Framework) {
|
||||
createAppArmorProfileCM(f)
|
||||
createAppArmorProfileLoader(f)
|
||||
}
|
||||
|
||||
// CreateAppArmorTestPod creates a pod that tests apparmor profile enforcement. The pod exits with
|
||||
// an error code if the profile is incorrectly enforced. If runOnce is true the pod will exit after
|
||||
// a single test, otherwise it will repeat the test every 1 second until failure.
|
||||
func CreateAppArmorTestPod(f *framework.Framework, unconfined bool, runOnce bool) *api.Pod {
|
||||
profile := "localhost/" + appArmorProfilePrefix + f.Namespace.Name
|
||||
testCmd := fmt.Sprintf(`
|
||||
if touch %[1]s; then
|
||||
echo "FAILURE: write to %[1]s should be denied"
|
||||
exit 1
|
||||
elif ! touch %[2]s; then
|
||||
echo "FAILURE: write to %[2]s should be allowed"
|
||||
exit 2
|
||||
elif [[ $(< /proc/self/attr/current) != "%[3]s" ]]; then
|
||||
echo "FAILURE: not running with expected profile %[3]s"
|
||||
echo "found: $(cat /proc/self/attr/current)"
|
||||
exit 3
|
||||
fi`, appArmorDeniedPath, appArmorAllowedPath, appArmorProfilePrefix+f.Namespace.Name)
|
||||
|
||||
if unconfined {
|
||||
profile = apparmor.ProfileNameUnconfined
|
||||
testCmd = `
|
||||
if cat /proc/sysrq-trigger 2>&1 | grep 'Permission denied'; then
|
||||
echo 'FAILURE: reading /proc/sysrq-trigger should be allowed'
|
||||
exit 1
|
||||
elif [[ $(< /proc/self/attr/current) != "unconfined" ]]; then
|
||||
echo 'FAILURE: not running with expected profile unconfined'
|
||||
exit 2
|
||||
fi`
|
||||
}
|
||||
|
||||
if !runOnce {
|
||||
testCmd = fmt.Sprintf(`while true; do
|
||||
%s
|
||||
sleep 1
|
||||
done`, testCmd)
|
||||
}
|
||||
|
||||
loaderAffinity := &api.Affinity{
|
||||
PodAffinity: &api.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []api.PodAffinityTerm{{
|
||||
Namespaces: []string{f.Namespace.Name},
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{loaderLabelKey: loaderLabelValue},
|
||||
},
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "test-apparmor-",
|
||||
Annotations: map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"test": "apparmor",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Affinity: loaderAffinity,
|
||||
Containers: []api.Container{{
|
||||
Name: "test",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", testCmd},
|
||||
}},
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if runOnce {
|
||||
pod = f.PodClient().Create(pod)
|
||||
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(
|
||||
f.ClientSet, pod.Name, f.Namespace.Name))
|
||||
var err error
|
||||
pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
framework.ExpectNoError(f.WaitForPodReady(pod.Name))
|
||||
}
|
||||
|
||||
// Verify Pod affinity colocated the Pods.
|
||||
loader := getRunningLoaderPod(f)
|
||||
Expect(pod.Spec.NodeName).To(Equal(loader.Spec.NodeName))
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func createAppArmorProfileCM(f *framework.Framework) {
|
||||
profileName := appArmorProfilePrefix + f.Namespace.Name
|
||||
profile := fmt.Sprintf(`#include <tunables/global>
|
||||
profile %s flags=(attach_disconnected) {
|
||||
#include <abstractions/base>
|
||||
|
||||
file,
|
||||
|
||||
deny %s w,
|
||||
audit %s w,
|
||||
}
|
||||
`, profileName, appArmorDeniedPath, appArmorAllowedPath)
|
||||
|
||||
cm := &api.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "apparmor-profiles",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
profileName: profile,
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
|
||||
framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap")
|
||||
}
|
||||
|
||||
func createAppArmorProfileLoader(f *framework.Framework) {
|
||||
True := true
|
||||
One := int32(1)
|
||||
loader := &api.ReplicationController{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "apparmor-loader",
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: &One,
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{loaderLabelKey: loaderLabelValue},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Name: "apparmor-loader",
|
||||
Image: "gcr.io/google_containers/apparmor-loader:0.1",
|
||||
Args: []string{"-poll", "10s", "/profiles"},
|
||||
SecurityContext: &api.SecurityContext{
|
||||
Privileged: &True,
|
||||
},
|
||||
VolumeMounts: []api.VolumeMount{{
|
||||
Name: "sys",
|
||||
MountPath: "/sys",
|
||||
ReadOnly: true,
|
||||
}, {
|
||||
Name: "apparmor-includes",
|
||||
MountPath: "/etc/apparmor.d",
|
||||
ReadOnly: true,
|
||||
}, {
|
||||
Name: "profiles",
|
||||
MountPath: "/profiles",
|
||||
ReadOnly: true,
|
||||
}},
|
||||
}},
|
||||
Volumes: []api.Volume{{
|
||||
Name: "sys",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
Path: "/sys",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "apparmor-includes",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
Path: "/etc/apparmor.d",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "profiles",
|
||||
VolumeSource: api.VolumeSource{
|
||||
ConfigMap: &api.ConfigMapVolumeSource{
|
||||
LocalObjectReference: api.LocalObjectReference{
|
||||
Name: "apparmor-profiles",
|
||||
},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(loader)
|
||||
framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController")
|
||||
|
||||
// Wait for loader to be ready.
|
||||
getRunningLoaderPod(f)
|
||||
}
|
||||
|
||||
func getRunningLoaderPod(f *framework.Framework) *api.Pod {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{loaderLabelKey: loaderLabelValue}))
|
||||
pods, err := framework.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, label)
|
||||
framework.ExpectNoError(err, "Failed to schedule apparmor-loader Pod")
|
||||
pod := &pods.Items[0]
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod), "Failed to run apparmor-loader Pod")
|
||||
return pod
|
||||
}
|
525
vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
generated
vendored
Normal file
525
vendor/k8s.io/kubernetes/test/e2e/common/autoscaling_utils.go
generated
vendored
Normal file
@ -0,0 +1,525 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
dynamicConsumptionTimeInSeconds = 30
|
||||
staticConsumptionTimeInSeconds = 3600
|
||||
dynamicRequestSizeInMillicores = 20
|
||||
dynamicRequestSizeInMegabytes = 100
|
||||
dynamicRequestSizeCustomMetric = 10
|
||||
port = 80
|
||||
targetPort = 8080
|
||||
timeoutRC = 120 * time.Second
|
||||
startServiceTimeout = time.Minute
|
||||
startServiceInterval = 5 * time.Second
|
||||
rcIsNil = "ERROR: replicationController = nil"
|
||||
deploymentIsNil = "ERROR: deployment = nil"
|
||||
rsIsNil = "ERROR: replicaset = nil"
|
||||
invalidKind = "ERROR: invalid workload kind for resource consumer"
|
||||
customMetricName = "QPS"
|
||||
serviceInitializationTimeout = 2 * time.Minute
|
||||
serviceInitializationInterval = 15 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
resourceConsumerImage = imageutils.GetE2EImage(imageutils.ResourceConsumer)
|
||||
resourceConsumerControllerImage = imageutils.GetE2EImage(imageutils.ResourceController)
|
||||
)
|
||||
|
||||
var (
|
||||
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
|
||||
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
|
||||
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
|
||||
subresource = "scale"
|
||||
)
|
||||
|
||||
/*
|
||||
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
|
||||
typical use case:
|
||||
rc.ConsumeCPU(600)
|
||||
// ... check your assumption here
|
||||
rc.ConsumeCPU(300)
|
||||
// ... check your assumption here
|
||||
*/
|
||||
type ResourceConsumer struct {
|
||||
name string
|
||||
controllerName string
|
||||
kind schema.GroupVersionKind
|
||||
nsName string
|
||||
clientSet clientset.Interface
|
||||
internalClientset *internalclientset.Clientset
|
||||
cpu chan int
|
||||
mem chan int
|
||||
customMetric chan int
|
||||
stopCPU chan int
|
||||
stopMem chan int
|
||||
stopCustomMetric chan int
|
||||
stopWaitGroup sync.WaitGroup
|
||||
consumptionTimeInSeconds int
|
||||
sleepTime time.Duration
|
||||
requestSizeInMillicores int
|
||||
requestSizeInMegabytes int
|
||||
requestSizeCustomMetric int
|
||||
}
|
||||
|
||||
func GetResourceConsumerImage() string {
|
||||
return resourceConsumerImage
|
||||
}
|
||||
|
||||
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset)
|
||||
}
|
||||
|
||||
// TODO this still defaults to replication controller
|
||||
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset)
|
||||
}
|
||||
|
||||
/*
|
||||
NewResourceConsumer creates new ResourceConsumer
|
||||
initCPUTotal argument is in millicores
|
||||
initMemoryTotal argument is in megabytes
|
||||
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||
*/
|
||||
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit)
|
||||
rc := &ResourceConsumer{
|
||||
name: name,
|
||||
controllerName: name + "-ctrl",
|
||||
kind: kind,
|
||||
nsName: nsName,
|
||||
clientSet: clientset,
|
||||
internalClientset: internalClientset,
|
||||
cpu: make(chan int),
|
||||
mem: make(chan int),
|
||||
customMetric: make(chan int),
|
||||
stopCPU: make(chan int),
|
||||
stopMem: make(chan int),
|
||||
stopCustomMetric: make(chan int),
|
||||
consumptionTimeInSeconds: consumptionTimeInSeconds,
|
||||
sleepTime: time.Duration(consumptionTimeInSeconds) * time.Second,
|
||||
requestSizeInMillicores: requestSizeInMillicores,
|
||||
requestSizeInMegabytes: requestSizeInMegabytes,
|
||||
requestSizeCustomMetric: requestSizeCustomMetric,
|
||||
}
|
||||
|
||||
go rc.makeConsumeCPURequests()
|
||||
rc.ConsumeCPU(initCPUTotal)
|
||||
|
||||
go rc.makeConsumeMemRequests()
|
||||
rc.ConsumeMem(initMemoryTotal)
|
||||
go rc.makeConsumeCustomMetric()
|
||||
rc.ConsumeCustomMetric(initCustomMetric)
|
||||
return rc
|
||||
}
|
||||
|
||||
// ConsumeCPU consumes given number of CPU
|
||||
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
|
||||
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
|
||||
rc.cpu <- millicores
|
||||
}
|
||||
|
||||
// ConsumeMem consumes given number of Mem
|
||||
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
|
||||
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
|
||||
rc.mem <- megabytes
|
||||
}
|
||||
|
||||
// ConsumeMem consumes given number of custom metric
|
||||
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
|
||||
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
|
||||
rc.customMetric <- amount
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) makeConsumeCPURequests() {
|
||||
defer GinkgoRecover()
|
||||
rc.stopWaitGroup.Add(1)
|
||||
defer rc.stopWaitGroup.Done()
|
||||
sleepTime := time.Duration(0)
|
||||
millicores := 0
|
||||
for {
|
||||
select {
|
||||
case millicores = <-rc.cpu:
|
||||
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
|
||||
case <-time.After(sleepTime):
|
||||
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
|
||||
rc.sendConsumeCPURequest(millicores)
|
||||
sleepTime = rc.sleepTime
|
||||
case <-rc.stopCPU:
|
||||
framework.Logf("RC %s: stopping CPU consumer", rc.name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) makeConsumeMemRequests() {
|
||||
defer GinkgoRecover()
|
||||
rc.stopWaitGroup.Add(1)
|
||||
defer rc.stopWaitGroup.Done()
|
||||
sleepTime := time.Duration(0)
|
||||
megabytes := 0
|
||||
for {
|
||||
select {
|
||||
case megabytes = <-rc.mem:
|
||||
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
|
||||
case <-time.After(sleepTime):
|
||||
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
|
||||
rc.sendConsumeMemRequest(megabytes)
|
||||
sleepTime = rc.sleepTime
|
||||
case <-rc.stopMem:
|
||||
framework.Logf("RC %s: stopping mem consumer", rc.name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) makeConsumeCustomMetric() {
|
||||
defer GinkgoRecover()
|
||||
rc.stopWaitGroup.Add(1)
|
||||
defer rc.stopWaitGroup.Done()
|
||||
sleepTime := time.Duration(0)
|
||||
delta := 0
|
||||
for {
|
||||
select {
|
||||
case delta := <-rc.customMetric:
|
||||
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
|
||||
case <-time.After(sleepTime):
|
||||
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
|
||||
rc.sendConsumeCustomMetric(delta)
|
||||
sleepTime = rc.sleepTime
|
||||
case <-rc.stopCustomMetric:
|
||||
framework.Logf("RC %s: stopping metric consumer", rc.name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
req := proxyRequest.Namespace(rc.nsName).
|
||||
Context(ctx).
|
||||
Name(rc.controllerName).
|
||||
Suffix("ConsumeCPU").
|
||||
Param("millicores", strconv.Itoa(millicores)).
|
||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
|
||||
framework.Logf("ConsumeCPU URL: %v", *req.URL())
|
||||
_, err = req.DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("ConsumeCPU failure: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// sendConsumeMemRequest sends POST request for memory consumption
|
||||
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
req := proxyRequest.Namespace(rc.nsName).
|
||||
Context(ctx).
|
||||
Name(rc.controllerName).
|
||||
Suffix("ConsumeMem").
|
||||
Param("megabytes", strconv.Itoa(megabytes)).
|
||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
|
||||
framework.Logf("ConsumeMem URL: %v", *req.URL())
|
||||
_, err = req.DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("ConsumeMem failure: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
// sendConsumeCustomMetric sends POST request for custom metric consumption
|
||||
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.CoreV1().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
req := proxyRequest.Namespace(rc.nsName).
|
||||
Context(ctx).
|
||||
Name(rc.controllerName).
|
||||
Suffix("BumpMetric").
|
||||
Param("metric", customMetricName).
|
||||
Param("delta", strconv.Itoa(delta)).
|
||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
|
||||
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
|
||||
_, err = req.DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("ConsumeCustomMetric failure: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) GetReplicas() int {
|
||||
switch rc.kind {
|
||||
case KindRC:
|
||||
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if replicationController == nil {
|
||||
framework.Failf(rcIsNil)
|
||||
}
|
||||
return int(replicationController.Status.ReadyReplicas)
|
||||
case KindDeployment:
|
||||
deployment, err := rc.clientSet.ExtensionsV1beta1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if deployment == nil {
|
||||
framework.Failf(deploymentIsNil)
|
||||
}
|
||||
return int(deployment.Status.ReadyReplicas)
|
||||
case KindReplicaSet:
|
||||
rs, err := rc.clientSet.ExtensionsV1beta1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if rs == nil {
|
||||
framework.Failf(rsIsNil)
|
||||
}
|
||||
return int(rs.Status.ReadyReplicas)
|
||||
default:
|
||||
framework.Failf(invalidKind)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
replicas := rc.GetReplicas()
|
||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||
})
|
||||
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
|
||||
interval := 10 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
replicas := rc.GetReplicas()
|
||||
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
|
||||
if replicas != desiredReplicas {
|
||||
return false, fmt.Errorf("number of replicas changed unexpectedly")
|
||||
} else {
|
||||
return false, nil // Expected number of replicas found. Continue polling until timeout.
|
||||
}
|
||||
})
|
||||
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
|
||||
if err == wait.ErrWaitTimeout {
|
||||
framework.Logf("Number of replicas was stable over %v", duration)
|
||||
return
|
||||
}
|
||||
framework.ExpectNoErrorWithOffset(1, err)
|
||||
}
|
||||
|
||||
// Pause stops background goroutines responsible for consuming resources.
|
||||
func (rc *ResourceConsumer) Pause() {
|
||||
By(fmt.Sprintf("HPA pausing RC %s", rc.name))
|
||||
rc.stopCPU <- 0
|
||||
rc.stopMem <- 0
|
||||
rc.stopCustomMetric <- 0
|
||||
rc.stopWaitGroup.Wait()
|
||||
}
|
||||
|
||||
// Pause starts background goroutines responsible for consuming resources.
|
||||
func (rc *ResourceConsumer) Resume() {
|
||||
By(fmt.Sprintf("HPA resuming RC %s", rc.name))
|
||||
go rc.makeConsumeCPURequests()
|
||||
go rc.makeConsumeMemRequests()
|
||||
go rc.makeConsumeCustomMetric()
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) CleanUp() {
|
||||
By(fmt.Sprintf("Removing consuming RC %s", rc.name))
|
||||
close(rc.stopCPU)
|
||||
close(rc.stopMem)
|
||||
close(rc.stopCustomMetric)
|
||||
rc.stopWaitGroup.Wait()
|
||||
// Wait some time to ensure all child goroutines are finished.
|
||||
time.Sleep(10 * time.Second)
|
||||
kind := rc.kind.GroupKind()
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
|
||||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
|
||||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
|
||||
}
|
||||
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64) {
|
||||
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
|
||||
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: port,
|
||||
TargetPort: intstr.FromInt(targetPort),
|
||||
}},
|
||||
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
rcConfig := testutils.RCConfig{
|
||||
Client: c,
|
||||
InternalClient: internalClient,
|
||||
Image: resourceConsumerImage,
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
Timeout: timeoutRC,
|
||||
Replicas: replicas,
|
||||
CpuRequest: cpuLimitMillis,
|
||||
CpuLimit: cpuLimitMillis,
|
||||
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
||||
MemLimit: memLimitMb * 1024 * 1024,
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case KindRC:
|
||||
framework.ExpectNoError(framework.RunRC(rcConfig))
|
||||
break
|
||||
case KindDeployment:
|
||||
dpConfig := testutils.DeploymentConfig{
|
||||
RCConfig: rcConfig,
|
||||
}
|
||||
framework.ExpectNoError(framework.RunDeployment(dpConfig))
|
||||
break
|
||||
case KindReplicaSet:
|
||||
rsConfig := testutils.ReplicaSetConfig{
|
||||
RCConfig: rcConfig,
|
||||
}
|
||||
By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
|
||||
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
|
||||
break
|
||||
default:
|
||||
framework.Failf(invalidKind)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Running controller"))
|
||||
controllerName := name + "-ctrl"
|
||||
_, err = c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: controllerName,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: port,
|
||||
TargetPort: intstr.FromInt(targetPort),
|
||||
}},
|
||||
|
||||
Selector: map[string]string{
|
||||
"name": controllerName,
|
||||
},
|
||||
},
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
dnsClusterFirst := v1.DNSClusterFirst
|
||||
controllerRcConfig := testutils.RCConfig{
|
||||
Client: c,
|
||||
Image: resourceConsumerControllerImage,
|
||||
Name: controllerName,
|
||||
Namespace: ns,
|
||||
Timeout: timeoutRC,
|
||||
Replicas: 1,
|
||||
Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
|
||||
DNSPolicy: &dnsClusterFirst,
|
||||
}
|
||||
framework.ExpectNoError(framework.RunRC(controllerRcConfig))
|
||||
|
||||
// Wait for endpoints to propagate for the controller service.
|
||||
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
|
||||
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
|
||||
}
|
||||
|
||||
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
|
||||
hpa := &autoscalingv1.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rc.name,
|
||||
Namespace: rc.nsName,
|
||||
},
|
||||
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
|
||||
APIVersion: rc.kind.GroupVersion().String(),
|
||||
Kind: rc.kind.Kind,
|
||||
Name: rc.name,
|
||||
},
|
||||
MinReplicas: &minReplicas,
|
||||
MaxReplicas: maxRepl,
|
||||
TargetCPUUtilizationPercentage: &cpu,
|
||||
},
|
||||
}
|
||||
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
|
||||
framework.ExpectNoError(errHPA)
|
||||
return hpa
|
||||
}
|
||||
|
||||
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
|
||||
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
|
||||
}
|
138
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
Normal file
138
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go
generated
vendored
Normal file
@ -0,0 +1,138 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
|
||||
f := framework.NewDefaultFramework("configmap")
|
||||
|
||||
/*
|
||||
Testname: configmap-in-env-field
|
||||
Description: Make sure config map value can be used as an environment
|
||||
variable in the container (on container.env field)
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via environment variable ", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
configMap := newConfigMap(f, name)
|
||||
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "env-test",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_DATA_1",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Key: "data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, []string{
|
||||
"CONFIG_DATA_1=value-1",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-envfrom-field
|
||||
Description: Make sure config map value can be used as an source for
|
||||
environment variables in the container (on container.envFrom field)
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via the environment ", func() {
|
||||
name := "configmap-test-" + string(uuid.NewUUID())
|
||||
configMap := newEnvFromConfigMap(f, name)
|
||||
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "env-test",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
|
||||
},
|
||||
{
|
||||
Prefix: "p_",
|
||||
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, []string{
|
||||
"data_1=value-1", "data_2=value-2", "data_3=value-3",
|
||||
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data_1": "value-1",
|
||||
"data_2": "value-2",
|
||||
"data_3": "value-3",
|
||||
},
|
||||
}
|
||||
}
|
629
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
Normal file
629
vendor/k8s.io/kubernetes/test/e2e/common/configmap_volume.go
generated
vendored
Normal file
@ -0,0 +1,629 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] ConfigMap", func() {
|
||||
f := framework.NewDefaultFramework("configmap")
|
||||
|
||||
/*
|
||||
Testname: configmap-nomap-simple
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod with no other settings.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume ", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-nomap-default-mode
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod with defaultMode set
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() {
|
||||
defaultMode := int32(0400)
|
||||
doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Feature:FSGroup]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-nomap-user
|
||||
Description: Make sure config map without mappings works by mounting it
|
||||
to a volume with a custom path (mapping) on the pod as non-root.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root ", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-simple-mapped
|
||||
Description: Make sure config map works by mounting it to a volume with
|
||||
a custom path (mapping) on the pod with no other settings and make sure
|
||||
the pod actually consumes it.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() {
|
||||
doConfigMapE2EWithMappings(f, 0, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-with-item-mode-mapped
|
||||
Description: Make sure config map works with an item mode (e.g. 0400)
|
||||
for the config map item.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set", func() {
|
||||
mode := int32(0400)
|
||||
doConfigMapE2EWithMappings(f, 0, 0, &mode)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-simple-user-mapped
|
||||
Description: Make sure config map works when it is mounted as non-root.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root ", func() {
|
||||
doConfigMapE2EWithMappings(f, 1000, 0, nil)
|
||||
})
|
||||
|
||||
It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup]", func() {
|
||||
doConfigMapE2EWithMappings(f, 1000, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-update-test
|
||||
Description: Make sure update operation is working on config map and
|
||||
the result is observed on volumes mounted in containers.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume ", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
name := "configmap-test-upd-" + string(uuid.NewUUID())
|
||||
volumeName := "configmap-volume"
|
||||
volumeMountPath := "/etc/configmap-volume"
|
||||
containerName := "configmap-volume-test"
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
}
|
||||
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
|
||||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-CUD-test
|
||||
Description: Make sure Create, Update, Delete operations are all working
|
||||
on config map and the result is observed on volumes mounted in containers.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume ", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
|
||||
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "delcm-volume-test"
|
||||
deleteVolumeName := "deletecm-volume"
|
||||
deleteConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "updcm-volume-test"
|
||||
updateVolumeName := "updatecm-volume"
|
||||
updateConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
createConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
|
||||
var err error
|
||||
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
|
||||
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: deleteName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: updateName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
|
||||
}
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: configmap-multiple-volumes
|
||||
Description: Make sure config map works when it mounted as two different
|
||||
volumes on the same node.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod ", func() {
|
||||
var (
|
||||
name = "configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
volumeMountPath = "/etc/configmap-volume"
|
||||
volumeName2 = "configmap-volume-2"
|
||||
volumeMountPath2 = "/etc/configmap-volume-2"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: mountImage,
|
||||
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, []string{
|
||||
"content of file \"/etc/configmap-volume/data-1\": value-1",
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
"data-2": "value-2",
|
||||
"data-3": "value-3",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
|
||||
userID := int64(uid)
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
volumeMountPath = "/etc/configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
one := int64(1)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
"--file_content=/etc/configmap-volume/data-1",
|
||||
"--file_mode=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
},
|
||||
}
|
||||
|
||||
if userID != 0 {
|
||||
pod.Spec.SecurityContext.RunAsUser = &userID
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode = defaultMode
|
||||
} else {
|
||||
mode := int32(0644)
|
||||
defaultMode = &mode
|
||||
}
|
||||
|
||||
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
|
||||
output := []string{
|
||||
"content of file \"/etc/configmap-volume/data-1\": value-1",
|
||||
"mode of file \"/etc/configmap-volume/data-1\": " + modeString,
|
||||
}
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
|
||||
userID := int64(uid)
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "configmap-test-volume-map-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
volumeMountPath = "/etc/configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
one := int64(1)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-2",
|
||||
Path: "path/to/data-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: mountImage,
|
||||
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
|
||||
"--file_mode=/etc/configmap-volume/path/to/data-2"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
},
|
||||
}
|
||||
|
||||
if userID != 0 {
|
||||
pod.Spec.SecurityContext.RunAsUser = &userID
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if itemMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
|
||||
} else {
|
||||
mode := int32(0644)
|
||||
itemMode = &mode
|
||||
}
|
||||
|
||||
// Just check file mode if fsGroup is not set. If fsGroup is set, the
|
||||
// final mode is adjusted and we are not testing that case.
|
||||
output := []string{
|
||||
"content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
|
||||
}
|
||||
if fsGroup == 0 {
|
||||
modeString := fmt.Sprintf("%v", os.FileMode(*itemMode))
|
||||
output = append(output, "mode of file \"/etc/configmap-volume/path/to/data-2\": "+modeString)
|
||||
}
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, output)
|
||||
}
|
448
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
Normal file
448
vendor/k8s.io/kubernetes/test/e2e/common/container_probe.go
generated
vendored
Normal file
@ -0,0 +1,448 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
probTestContainerName = "test-webserver"
|
||||
probTestInitialDelaySeconds = 15
|
||||
|
||||
defaultObservationTimeout = time.Minute * 2
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Probing container", func() {
|
||||
f := framework.NewDefaultFramework("container-probe")
|
||||
var podClient *framework.PodClient
|
||||
probe := webserverProbeBuilder{}
|
||||
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-readiness-probe-initial-delay
|
||||
Description: Make sure that pod with readiness probe should not be
|
||||
ready before initial delay and never restart.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart ", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
|
||||
f.WaitForPodReady(p.Name)
|
||||
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
isReady, err := testutils.PodRunningReady(p)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(isReady).To(BeTrue(), "pod should be ready")
|
||||
|
||||
// We assume the pod became ready when the container became ready. This
|
||||
// is true for a single container pod.
|
||||
readyTime, err := getTransitionTimeForReadyCondition(p)
|
||||
framework.ExpectNoError(err)
|
||||
startedTime, err := getContainerStartedTime(p, probTestContainerName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
|
||||
initialDelay := probTestInitialDelaySeconds * time.Second
|
||||
if readyTime.Sub(startedTime) < initialDelay {
|
||||
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
|
||||
}
|
||||
|
||||
restartCount := getRestartCount(p)
|
||||
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-readiness-probe-failure
|
||||
Description: Make sure that pod with readiness probe that fails should
|
||||
never be ready and never restart.
|
||||
*/
|
||||
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart ", func() {
|
||||
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
|
||||
Consistently(func() (bool, error) {
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return podutil.IsPodReady(p), nil
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
|
||||
|
||||
p, err := podClient.Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
isReady, err := testutils.PodRunningReady(p)
|
||||
Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
|
||||
|
||||
restartCount := getRestartCount(p)
|
||||
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-cat-liveness-probe-restarted
|
||||
Description: Make sure the pod is restarted with a cat /tmp/health
|
||||
liveness probe.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"cat", "/tmp/health"},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-cat-liveness-probe-not-restarted
|
||||
Description: Make sure the pod is not restarted with a cat /tmp/health
|
||||
liveness probe.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"cat", "/tmp/health"},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 0, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-http-liveness-probe-restarted
|
||||
Description: Make sure when http liveness probe fails, the pod should
|
||||
be restarted.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a /healthz http liveness probe ", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
// Slow by design (5 min)
|
||||
/*
|
||||
Testname: pods-restart-count
|
||||
Description: Make sure when a pod gets restarted, its start count
|
||||
should increase.
|
||||
*/
|
||||
framework.ConformanceIt("should have monotonically increasing restart count [Slow]", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.Liveness),
|
||||
Command: []string{"/server"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 5,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 5, time.Minute*5)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-http-liveness-probe-not-restarted
|
||||
Description: Make sure when http liveness probe succeeds, the pod
|
||||
should not be restarted.
|
||||
*/
|
||||
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe ", func() {
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-http",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 80}},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/",
|
||||
Port: intstr.FromInt(80),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 5,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 0, defaultObservationTimeout)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-docker-liveness-probe-timeout
|
||||
Description: Make sure that the pod is restarted with a docker exec
|
||||
liveness probe with timeout.
|
||||
*/
|
||||
framework.ConformanceIt("should be restarted with a docker exec liveness probe with timeout ", func() {
|
||||
// TODO: enable this test once the default exec handler supports timeout.
|
||||
framework.Skipf("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
|
||||
runLivenessTest(f, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "liveness-exec",
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "liveness",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "sleep 600"},
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: []string{"/bin/sh", "-c", "sleep 10"},
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 15,
|
||||
TimeoutSeconds: 1,
|
||||
FailureThreshold: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, 1, defaultObservationTimeout)
|
||||
})
|
||||
})
|
||||
|
||||
func getContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error) {
|
||||
for _, status := range p.Status.ContainerStatuses {
|
||||
if status.Name != containerName {
|
||||
continue
|
||||
}
|
||||
if status.State.Running == nil {
|
||||
return time.Time{}, fmt.Errorf("Container is not running")
|
||||
}
|
||||
return status.State.Running.StartedAt.Time, nil
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
|
||||
}
|
||||
|
||||
func getTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
|
||||
for _, cond := range p.Status.Conditions {
|
||||
if cond.Type == v1.PodReady {
|
||||
return cond.LastTransitionTime.Time, nil
|
||||
}
|
||||
}
|
||||
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
|
||||
}
|
||||
|
||||
func getRestartCount(p *v1.Pod) int {
|
||||
count := 0
|
||||
for _, containerStatus := range p.Status.ContainerStatuses {
|
||||
count += int(containerStatus.RestartCount)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: probTestContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
LivenessProbe: livenessProbe,
|
||||
ReadinessProbe: readinessProbe,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
type webserverProbeBuilder struct {
|
||||
failing bool
|
||||
initialDelay bool
|
||||
}
|
||||
|
||||
func (b webserverProbeBuilder) withFailing() webserverProbeBuilder {
|
||||
b.failing = true
|
||||
return b
|
||||
}
|
||||
|
||||
func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
|
||||
b.initialDelay = true
|
||||
return b
|
||||
}
|
||||
|
||||
func (b webserverProbeBuilder) build() *v1.Probe {
|
||||
probe := &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Port: intstr.FromInt(80),
|
||||
Path: "/",
|
||||
},
|
||||
},
|
||||
}
|
||||
if b.initialDelay {
|
||||
probe.InitialDelaySeconds = probTestInitialDelaySeconds
|
||||
}
|
||||
if b.failing {
|
||||
probe.HTTPGet.Port = intstr.FromInt(81)
|
||||
}
|
||||
return probe
|
||||
}
|
||||
|
||||
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
|
||||
podClient := f.PodClient()
|
||||
ns := f.Namespace.Name
|
||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||
containerName := pod.Spec.Containers[0].Name
|
||||
// At the end of the test, clean up by removing the pod.
|
||||
defer func() {
|
||||
By("deleting the pod")
|
||||
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
}()
|
||||
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
|
||||
podClient.Create(pod)
|
||||
|
||||
// Wait until the pod is not pending. (Here we need to check for something other than
|
||||
// 'Pending' other than checking for 'Running', since when failures occur, we go to
|
||||
// 'Terminated' which can cause indefinite blocking.)
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
|
||||
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
|
||||
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
|
||||
|
||||
// Check the pod's current state and verify that restartCount is present.
|
||||
By("checking the pod's current state and verifying that restartCount is present")
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
|
||||
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
||||
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
|
||||
|
||||
// Wait for the restart state to be as desired.
|
||||
deadline := time.Now().Add(timeout)
|
||||
lastRestartCount := initialRestartCount
|
||||
observedRestarts := int32(0)
|
||||
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
|
||||
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
|
||||
if restartCount != lastRestartCount {
|
||||
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
|
||||
ns, pod.Name, restartCount, time.Since(start))
|
||||
if restartCount < lastRestartCount {
|
||||
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
|
||||
ns, pod.Name, lastRestartCount, restartCount)
|
||||
}
|
||||
}
|
||||
observedRestarts = restartCount - initialRestartCount
|
||||
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
|
||||
// Stop if we have observed more than expectNumRestarts restarts.
|
||||
break
|
||||
}
|
||||
lastRestartCount = restartCount
|
||||
}
|
||||
|
||||
// If we expected 0 restarts, fail if observed any restart.
|
||||
// If we expected n restarts (n > 0), fail if we observed < n restarts.
|
||||
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
|
||||
int(observedRestarts) < expectNumRestarts) {
|
||||
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
|
||||
ns, pod.Name, expectNumRestarts, observedRestarts)
|
||||
}
|
||||
}
|
113
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/test/e2e/common/docker_containers.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Docker Containers", func() {
|
||||
f := framework.NewDefaultFramework("containers")
|
||||
|
||||
/*
|
||||
Testname: container-without-command-args
|
||||
Description: When a Pod is created neither 'command' nor 'args' are
|
||||
provided for a Container, ensure that the docker image's default
|
||||
command and args are used.
|
||||
*/
|
||||
framework.ConformanceIt("should use the image defaults if command and args are blank ", func() {
|
||||
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
|
||||
"[/ep default arguments]",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: container-with-args
|
||||
Description: When a Pod is created and 'args' are provided for a
|
||||
Container, ensure that they take precedent to the docker image's
|
||||
default arguments, but that the default command is used.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) ", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
|
||||
|
||||
f.TestContainerOutput("override arguments", pod, 0, []string{
|
||||
"[/ep override arguments]",
|
||||
})
|
||||
})
|
||||
|
||||
// Note: when you override the entrypoint, the image's arguments (docker cmd)
|
||||
// are ignored.
|
||||
/*
|
||||
Testname: container-with-command
|
||||
Description: When a Pod is created and 'command' is provided for a
|
||||
Container, ensure that it takes precedent to the docker image's default
|
||||
command.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default commmand (docker entrypoint) ", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Command = []string{"/ep-2"}
|
||||
|
||||
f.TestContainerOutput("override command", pod, 0, []string{
|
||||
"[/ep-2]",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: container-with-command-args
|
||||
Description: When a Pod is created and 'command' and 'args' are
|
||||
provided for a Container, ensure that they take precedent to the docker
|
||||
image's default command and arguments.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to override the image's default command and arguments ", func() {
|
||||
pod := entrypointTestPod()
|
||||
pod.Spec.Containers[0].Command = []string{"/ep-2"}
|
||||
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
|
||||
|
||||
f.TestContainerOutput("override all", pod, 0, []string{
|
||||
"[/ep-2 override arguments]",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
const testContainerName = "test-container"
|
||||
|
||||
// Return a prototypical entrypoint test pod
|
||||
func entrypointTestPod() *v1.Pod {
|
||||
podName := "client-containers-" + string(uuid.NewUUID())
|
||||
|
||||
one := int64(1)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.EntrypointTester),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
},
|
||||
}
|
||||
}
|
382
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
Normal file
382
vendor/k8s.io/kubernetes/test/e2e/common/downward_api.go
generated
vendored
Normal file
@ -0,0 +1,382 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var (
|
||||
hostIPVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
podUIDVersion = utilversion.MustParseSemantic("v1.8.0")
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-api-machinery] Downward API", func() {
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-name-namespace-podip
|
||||
Description: Ensure that downward API can provide pod's name, namespace
|
||||
and IP address as environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars ", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_NAMESPACE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POD_IP",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectations := []string{
|
||||
fmt.Sprintf("POD_NAME=%v", podName),
|
||||
fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
|
||||
"POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
|
||||
}
|
||||
|
||||
testDownwardAPI(f, podName, env, expectations)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-host-ip
|
||||
Description: Ensure that downward API can provide an IP address for
|
||||
host node as an environment variable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide host IP as an env var ", func() {
|
||||
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "HOST_IP",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "status.hostIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectations := []string{
|
||||
"HOST_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
|
||||
}
|
||||
|
||||
testDownwardAPI(f, podName, env, expectations)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-limits-requests
|
||||
Description: Ensure that downward API can provide CPU/memory limit
|
||||
and CPU/memory request as environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars ", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "CPU_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "CPU_REQUEST",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "MEMORY_REQUEST",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
"CPU_LIMIT=2",
|
||||
"MEMORY_LIMIT=67108864",
|
||||
"CPU_REQUEST=1",
|
||||
"MEMORY_REQUEST=33554432",
|
||||
}
|
||||
|
||||
testDownwardAPI(f, podName, env, expectations)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-default-allocatable
|
||||
Description: Ensure that downward API can provide default node
|
||||
allocatable values for CPU and memory as environment variables if CPU
|
||||
and memory limits are not specified for a container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable ", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "CPU_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
"CPU_LIMIT=[1-9]",
|
||||
"MEMORY_LIMIT=[1-9]",
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-env-pod-uid
|
||||
Description: Ensure that downward API can provide pod UID as an
|
||||
environment variable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide pod UID as env vars ", func() {
|
||||
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "POD_UID",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.uid",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectations := []string{
|
||||
"POD_UID=[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}",
|
||||
}
|
||||
|
||||
testDownwardAPI(f, podName, env, expectations)
|
||||
})
|
||||
})
|
||||
|
||||
var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
|
||||
Context("Downward API tests for local ephemeral storage", func() {
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessLocalEphemeralStorageEnabled()
|
||||
})
|
||||
|
||||
It("should provide container's limits.ephemeral-storage and requests.ephemeral-storage as env vars", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "EPHEMERAL_STORAGE_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.ephemeral-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "EPHEMERAL_STORAGE_REQUEST",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "requests.ephemeral-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
fmt.Sprintf("EPHEMERAL_STORAGE_LIMIT=%d", 64*1024*1024),
|
||||
fmt.Sprintf("EPHEMERAL_STORAGE_REQUEST=%d", 32*1024*1024),
|
||||
}
|
||||
|
||||
testDownwardAPIForEphemeralStorage(f, podName, env, expectations)
|
||||
})
|
||||
|
||||
It("should provide default limits.ephemeral-storage from node allocatable", func() {
|
||||
podName := "downward-api-" + string(uuid.NewUUID())
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "EPHEMERAL_STORAGE_LIMIT",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.ephemeral-storage",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
expectations := []string{
|
||||
"EPHEMERAL_STORAGE_LIMIT=[1-9]",
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("250m"),
|
||||
v1.ResourceMemory: resource.MustParse("32Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1250m"),
|
||||
v1.ResourceMemory: resource.MustParse("64Mi"),
|
||||
},
|
||||
},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
}
|
||||
|
||||
func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("32Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceEphemeralStorage: resource.MustParse("64Mi"),
|
||||
},
|
||||
},
|
||||
Env: env,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
testDownwardAPIUsingPod(f, pod, env, expectations)
|
||||
}
|
||||
|
||||
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
|
||||
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
|
||||
}
|
474
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
Normal file
474
vendor/k8s.io/kubernetes/test/e2e/common/downwardapi_volume.go
generated
vendored
Normal file
@ -0,0 +1,474 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] Downward API volume", func() {
|
||||
// How long to wait for a log pod to be displayed
|
||||
const podLogTimeout = 2 * time.Minute
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-podname
|
||||
Description: Ensure that downward API can provide pod's name through
|
||||
DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-set-default-mode
|
||||
Description: Ensure that downward API can set default file premission
|
||||
mode for DownwardAPIVolumeFiles if no mode is specified.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-set-mode
|
||||
Description: Ensure that downward API can set file premission mode for
|
||||
DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
It("should provide podname as non-root with fsgroup [Feature:FSGroup]", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
FSGroup: &gid,
|
||||
}
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
})
|
||||
})
|
||||
|
||||
It("should provide podname as non-root with fsgroup and defaultMode [Feature:FSGroup]", func() {
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
uid := int64(1001)
|
||||
gid := int64(1234)
|
||||
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
RunAsUser: &uid,
|
||||
FSGroup: &gid,
|
||||
}
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podname\": -r--r-----",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-update-label
|
||||
Description: Ensure that downward API updates labels in
|
||||
DownwardAPIVolumeFiles when pod's labels get modified.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification ", func() {
|
||||
labels := map[string]string{}
|
||||
labels["key1"] = "value1"
|
||||
labels["key2"] = "value2"
|
||||
|
||||
podName := "labelsupdate" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
|
||||
containerName := "client-container"
|
||||
By("Creating the pod")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
|
||||
|
||||
//modify labels
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Labels["key3"] = "value3"
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-update-annotation
|
||||
Description: Ensure that downward API updates annotations in
|
||||
DownwardAPIVolumeFiles when pod's annotations get modified.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification ", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
|
||||
|
||||
containerName := "client-container"
|
||||
By("Creating the pod")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
|
||||
|
||||
//modify annotations
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Annotations["builder"] = "foo"
|
||||
})
|
||||
|
||||
Eventually(func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-cpu-limit
|
||||
Description: Ensure that downward API can provide container's CPU limit
|
||||
through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("2\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-memory-limit
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
limit through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("67108864\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-cpu-request
|
||||
Description: Ensure that downward API can provide container's CPU
|
||||
request through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("1\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-memory-request
|
||||
Description: Ensure that downward API can provide container's memory
|
||||
request through DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("33554432\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-default-cpu
|
||||
Description: Ensure that downward API can provide default node
|
||||
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
|
||||
limit is not specified for a container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: downwardapi-volume-default-memory
|
||||
Description: Ensure that downward API can provide default node
|
||||
allocatable value for memory through DownwardAPIVolumeFiles if memory
|
||||
limit is not specified for a container.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set ", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--file_mode=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if itemMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].Mode = itemMode
|
||||
}
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--file_content=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
|
||||
return []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--file_content=" + filePath},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("250m"),
|
||||
v1.ResourceMemory: resource.MustParse("32Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1250m"),
|
||||
v1.ResourceMemory: resource.MustParse("64Mi"),
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
|
||||
return []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--file_content=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, labels, annotations)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations, pod)
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "podinfo",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "podname",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "cpu_limit",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "cpu_request",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "memory_limit",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "memory_request",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
|
||||
if len(labels) > 0 {
|
||||
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
|
||||
Path: "labels",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.labels",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if len(annotations) > 0 {
|
||||
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
|
||||
Path: "annotations",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.annotations",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add test-webserver example as pointed out in https://github.com/kubernetes/kubernetes/pull/5093#discussion-diff-37606771
|
465
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
Normal file
465
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go
generated
vendored
Normal file
@ -0,0 +1,465 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
volumePath = "/test-volume"
|
||||
)
|
||||
|
||||
var (
|
||||
testImageRootUid = imageutils.GetE2EImage(imageutils.Mounttest)
|
||||
testImageNonRootUid = imageutils.GetE2EImage(imageutils.MounttestUser)
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] EmptyDir volumes", func() {
|
||||
f := framework.NewDefaultFramework("emptydir")
|
||||
|
||||
Context("when FSGroup is specified [Feature:FSGroup]", func() {
|
||||
It("new files should be created with FSGroup ownership when container is root", func() {
|
||||
doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
It("new files should be created with FSGroup ownership when container is non-root", func() {
|
||||
doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() {
|
||||
doTestSubPathFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
|
||||
doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
It("volume on default medium should have the correct mode using FSGroup", func() {
|
||||
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
It("volume on tmpfs should have the correct mode using FSGroup", func() {
|
||||
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-mode-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
|
||||
mount type.
|
||||
*/
|
||||
framework.ConformanceIt("volume on tmpfs should have the correct mode", func() {
|
||||
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0644-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a root owned file with 0644 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,tmpfs)", func() {
|
||||
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0666-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a root owned file with 0666 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,tmpfs)", func() {
|
||||
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0777-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a root owned file with 0777 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,tmpfs)", func() {
|
||||
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0644-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a user owned file with 0644 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,tmpfs)", func() {
|
||||
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0666-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a user owned file with 0666 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,tmpfs)", func() {
|
||||
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0777-tmpfs
|
||||
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
|
||||
of 'Memory', ensure a user owned file with 0777 unix file permissions
|
||||
is created correctly, has tmpfs mount type, and enforces the permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,tmpfs)", func() {
|
||||
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-mode
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure the
|
||||
volume has 0777 unix file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("volume on default medium should have the correct mode", func() {
|
||||
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0644
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
root owned file with 0644 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,default)", func() {
|
||||
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0666
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
root owned file with 0666 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,default)", func() {
|
||||
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-root-0777
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
root owned file with 0777 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,default)", func() {
|
||||
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0644
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
user owned file with 0644 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,default)", func() {
|
||||
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0666
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
user owned file with 0666 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,default)", func() {
|
||||
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-emptydir-user-0777
|
||||
Description: For a Pod created with an 'emptyDir' Volume, ensure a
|
||||
user owned file with 0777 unix file permissions is created and enforced
|
||||
correctly.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,default)", func() {
|
||||
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
|
||||
})
|
||||
})
|
||||
|
||||
const (
|
||||
containerName = "test-container"
|
||||
volumeName = "test-volume"
|
||||
)
|
||||
|
||||
func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(testImageRootUid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0660=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
fmt.Sprintf("--file_owner=%v", filePath),
|
||||
}
|
||||
|
||||
fsGroup := int64(123)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-rw----",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
"owner GID of \"/test-volume/test-file\": 123",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTestSubPathFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
subPath = "test-sub"
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(image, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
fmt.Sprintf("--file_owner=%v", volumePath),
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].VolumeMounts[0].SubPath = subPath
|
||||
|
||||
fsGroup := int64(123)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir subpath on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
"owner UID of \"/test-volume\": 0",
|
||||
"owner GID of \"/test-volume\": 123",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(testImageRootUid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(image, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0644=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
fsGroup := int64(123)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-r--r--",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(testImageRootUid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(image, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0644=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-r--r--",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(image, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0666=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0666 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-rw-rw-",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(image, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0777=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0777 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rwxrwxrwx",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func formatMedium(medium v1.StorageMedium) string {
|
||||
if medium == v1.StorageMediumMemory {
|
||||
return "tmpfs"
|
||||
}
|
||||
|
||||
return "node default medium"
|
||||
}
|
||||
|
||||
func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: image,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: source,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
152
vendor/k8s.io/kubernetes/test/e2e/common/events.go
generated
vendored
Normal file
152
vendor/k8s.io/kubernetes/test/e2e/common/events.go
generated
vendored
Normal file
@ -0,0 +1,152 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type Action func() error
|
||||
|
||||
// Returns true if a node update matching the predicate was emitted from the
|
||||
// system after performing the supplied action.
|
||||
func ObserveNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodePredicate func(*v1.Node) bool, action Action) (bool, error) {
|
||||
observedMatchingNode := false
|
||||
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
|
||||
informerStartedChan := make(chan struct{})
|
||||
var informerStartedGuard sync.Once
|
||||
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
ls, err := f.ClientSet.CoreV1().Nodes().List(options)
|
||||
return ls, err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
// Signal parent goroutine that watching has begun.
|
||||
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
|
||||
options.FieldSelector = nodeSelector.String()
|
||||
w, err := f.ClientSet.CoreV1().Nodes().Watch(options)
|
||||
return w, err
|
||||
},
|
||||
},
|
||||
&v1.Node{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
n, ok := newObj.(*v1.Node)
|
||||
Expect(ok).To(Equal(true))
|
||||
if nodePredicate(n) {
|
||||
observedMatchingNode = true
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Start the informer and block this goroutine waiting for the started signal.
|
||||
informerStopChan := make(chan struct{})
|
||||
defer func() { close(informerStopChan) }()
|
||||
go controller.Run(informerStopChan)
|
||||
<-informerStartedChan
|
||||
|
||||
// Invoke the action function.
|
||||
err := action()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Poll whether the informer has found a matching node update with a timeout.
|
||||
// Wait up 2 minutes polling every second.
|
||||
timeout := 2 * time.Minute
|
||||
interval := 1 * time.Second
|
||||
err = wait.Poll(interval, timeout, func() (bool, error) {
|
||||
return observedMatchingNode, nil
|
||||
})
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
// Returns true if an event matching the predicate was emitted from the system
|
||||
// after performing the supplied action.
|
||||
func ObserveEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Event) bool, action Action) (bool, error) {
|
||||
observedMatchingEvent := false
|
||||
informerStartedChan := make(chan struct{})
|
||||
var informerStartedGuard sync.Once
|
||||
|
||||
// Create an informer to list/watch events from the test framework namespace.
|
||||
_, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
ls, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
|
||||
return ls, err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
// Signal parent goroutine that watching has begun.
|
||||
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
|
||||
w, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).Watch(options)
|
||||
return w, err
|
||||
},
|
||||
},
|
||||
&v1.Event{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
e, ok := obj.(*v1.Event)
|
||||
By(fmt.Sprintf("Considering event: \nType = [%s], Name = [%s], Reason = [%s], Message = [%s]", e.Type, e.Name, e.Reason, e.Message))
|
||||
Expect(ok).To(Equal(true))
|
||||
if ok && eventPredicate(e) {
|
||||
observedMatchingEvent = true
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Start the informer and block this goroutine waiting for the started signal.
|
||||
informerStopChan := make(chan struct{})
|
||||
defer func() { close(informerStopChan) }()
|
||||
go controller.Run(informerStopChan)
|
||||
<-informerStartedChan
|
||||
|
||||
// Invoke the action function.
|
||||
err := action()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Poll whether the informer has found a matching event with a timeout.
|
||||
// Wait up 2 minutes polling every second.
|
||||
timeout := 2 * time.Minute
|
||||
interval := 1 * time.Second
|
||||
err = wait.Poll(interval, timeout, func() (bool, error) {
|
||||
return observedMatchingEvent, nil
|
||||
})
|
||||
return err == nil, err
|
||||
}
|
147
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// These tests exercise the Kubernetes expansion syntax $(VAR).
|
||||
// For more information, see:
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/expansion.md
|
||||
var _ = framework.KubeDescribe("Variable Expansion", func() {
|
||||
f := framework.NewDefaultFramework("var-expansion")
|
||||
|
||||
/*
|
||||
Testname: var-expansion-env
|
||||
Description: Make sure environment variables can be set using an
|
||||
expansion of previously defined environment variables
|
||||
*/
|
||||
framework.ConformanceIt("should allow composing env vars into new env vars ", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "FOO",
|
||||
Value: "foo-value",
|
||||
},
|
||||
{
|
||||
Name: "BAR",
|
||||
Value: "bar-value",
|
||||
},
|
||||
{
|
||||
Name: "FOOBAR",
|
||||
Value: "$(FOO);;$(BAR)",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("env composition", pod, 0, []string{
|
||||
"FOO=foo-value",
|
||||
"BAR=bar-value",
|
||||
"FOOBAR=foo-value;;bar-value",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-command
|
||||
Description: Make sure a container's commands can be set using an
|
||||
expansion of environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should allow substituting values in a container's command ", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "TEST_VAR",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("substitution in container's command", pod, 0, []string{
|
||||
"test-value",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: var-expansion-arg
|
||||
Description: Make sure a container's args can be set using an
|
||||
expansion of environment variables.
|
||||
*/
|
||||
framework.ConformanceIt("should allow substituting values in a container's args ", func() {
|
||||
podName := "var-expansion-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "dapi-container",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c"},
|
||||
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "TEST_VAR",
|
||||
Value: "test-value",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("substitution in container's args", pod, 0, []string{
|
||||
"test-value",
|
||||
})
|
||||
})
|
||||
})
|
269
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
Normal file
269
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
//TODO : Consolidate this code with the code for emptyDir.
|
||||
//This will require some smart.
|
||||
var _ = Describe("[sig-storage] HostPath", func() {
|
||||
f := framework.NewDefaultFramework("hostpath")
|
||||
|
||||
BeforeEach(func() {
|
||||
// TODO permission denied cleanup failures
|
||||
//cleanup before running the test.
|
||||
_ = os.Remove("/tmp/test-file")
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: volume-hostpath-mode
|
||||
Description: For a Pod created with a 'HostPath' Volume, ensure the
|
||||
volume is a directory with 0777 unix file permissions and that is has
|
||||
the sticky bit (mode flag t) set.
|
||||
*/
|
||||
framework.ConformanceIt("should give a volume the correct mode", func() {
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_mode=%v", volumePath),
|
||||
}
|
||||
f.TestContainerOutput("hostPath mode", pod, 0, []string{
|
||||
"mode of file \"/test-volume\": dtrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
|
||||
})
|
||||
})
|
||||
|
||||
// This test requires mounting a folder into a container with write privileges.
|
||||
It("should support r/w", func() {
|
||||
filePath := path.Join(volumePath, "test-file")
|
||||
retryDuration := 180
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
fmt.Sprintf("--new_file_0644=%v", filePath),
|
||||
fmt.Sprintf("--file_mode=%v", filePath),
|
||||
}
|
||||
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePath),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
//Read the content of the file with the second container to
|
||||
//verify volumes being shared properly among containers within the pod.
|
||||
f.TestContainerOutput("hostPath r/w", pod, 1, []string{
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
|
||||
It("should support subPath", func() {
|
||||
subPath := "sub-path"
|
||||
fileName := "test-file"
|
||||
retryDuration := 180
|
||||
|
||||
filePathInWriter := path.Join(volumePath, fileName)
|
||||
filePathInReader := path.Join(volumePath, subPath, fileName)
|
||||
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
|
||||
// Write the file in the subPath from container 0
|
||||
container := &pod.Spec.Containers[0]
|
||||
container.VolumeMounts[0].SubPath = subPath
|
||||
container.Args = []string{
|
||||
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
|
||||
fmt.Sprintf("--file_mode=%v", filePathInWriter),
|
||||
}
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
|
||||
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
|
||||
It("should support existing directory subPath", func() {
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
|
||||
subPath := "sub-path"
|
||||
fileName := "test-file"
|
||||
retryDuration := 180
|
||||
|
||||
filePathInWriter := path.Join(volumePath, fileName)
|
||||
filePathInReader := path.Join(volumePath, subPath, fileName)
|
||||
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
pod.Spec.NodeName = nodeList.Items[0].Name
|
||||
|
||||
// Create the subPath directory on the host
|
||||
existing := path.Join(source.Path, subPath)
|
||||
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code != 0 {
|
||||
framework.Failf("mkdir returned non-zero")
|
||||
}
|
||||
|
||||
// Write the file in the subPath from container 0
|
||||
container := &pod.Spec.Containers[0]
|
||||
container.VolumeMounts[0].SubPath = subPath
|
||||
container.Args = []string{
|
||||
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
|
||||
fmt.Sprintf("--file_mode=%v", filePathInWriter),
|
||||
}
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
|
||||
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
|
||||
// TODO consolidate common code of this test and above
|
||||
It("should support existing single file subPath", func() {
|
||||
framework.SkipUnlessSSHKeyPresent()
|
||||
|
||||
subPath := "sub-path-test-file"
|
||||
retryDuration := 180
|
||||
|
||||
filePathInReader := path.Join(volumePath, subPath)
|
||||
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source)
|
||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
pod.Spec.NodeName = nodeList.Items[0].Name
|
||||
|
||||
// Create the subPath file on the host
|
||||
existing := path.Join(source.Path, subPath)
|
||||
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
|
||||
framework.LogSSHResult(result)
|
||||
framework.ExpectNoError(err)
|
||||
if result.Code != 0 {
|
||||
framework.Failf("echo returned non-zero")
|
||||
}
|
||||
|
||||
// Mount the file to the subPath in container 0
|
||||
container := &pod.Spec.Containers[0]
|
||||
container.VolumeMounts[0].SubPath = subPath
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
|
||||
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
//These constants are borrowed from the other test.
|
||||
//const volumeName = "test-volume"
|
||||
const containerName1 = "test-container-1"
|
||||
const containerName2 = "test-container-2"
|
||||
|
||||
func mount(source *v1.HostPathVolumeSource) []v1.Volume {
|
||||
return []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: source,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
|
||||
func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
|
||||
podName := "pod-host-path-test"
|
||||
privileged := true
|
||||
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName1,
|
||||
Image: mountImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: containerName2,
|
||||
Image: mountImage,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: mount(source),
|
||||
},
|
||||
}
|
||||
}
|
380
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
Normal file
380
vendor/k8s.io/kubernetes/test/e2e/common/init_container.go
generated
vendored
Normal file
@ -0,0 +1,380 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("InitContainer", func() {
|
||||
f := framework.NewDefaultFramework("init-container")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
It("should invoke init containers on a RestartNever pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
|
||||
Expect(err).To(BeNil())
|
||||
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||
endPod := event.Object.(*v1.Pod)
|
||||
Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
|
||||
Expect(init).NotTo(BeNil())
|
||||
Expect(init.Status).To(Equal(v1.ConditionTrue))
|
||||
|
||||
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||
for _, status := range endPod.Status.InitContainerStatuses {
|
||||
Expect(status.Ready).To(BeTrue())
|
||||
Expect(status.State.Terminated).NotTo(BeNil())
|
||||
Expect(status.State.Terminated.ExitCode).To(BeZero())
|
||||
}
|
||||
})
|
||||
|
||||
It("should invoke init containers on a RestartAlways pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
|
||||
Expect(err).To(BeNil())
|
||||
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||
endPod := event.Object.(*v1.Pod)
|
||||
Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
|
||||
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
|
||||
Expect(init).NotTo(BeNil())
|
||||
Expect(init.Status).To(Equal(v1.ConditionTrue))
|
||||
|
||||
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||
for _, status := range endPod.Status.InitContainerStatuses {
|
||||
Expect(status.Ready).To(BeTrue())
|
||||
Expect(status.State.Terminated).NotTo(BeNil())
|
||||
Expect(status.State.Terminated.ExitCode).To(BeZero())
|
||||
}
|
||||
})
|
||||
|
||||
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(
|
||||
framework.PodStartTimeout, wr,
|
||||
// check for the first container to fail at least once
|
||||
func(evt watch.Event) (bool, error) {
|
||||
switch t := evt.Object.(type) {
|
||||
case *v1.Pod:
|
||||
for _, status := range t.Status.ContainerStatuses {
|
||||
if status.State.Waiting == nil {
|
||||
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
|
||||
}
|
||||
if status.State.Waiting.Reason != "PodInitializing" {
|
||||
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
|
||||
}
|
||||
}
|
||||
if len(t.Status.InitContainerStatuses) != 2 {
|
||||
return false, nil
|
||||
}
|
||||
status := t.Status.InitContainerStatuses[1]
|
||||
if status.State.Waiting == nil {
|
||||
return false, fmt.Errorf("second init container should not be out of waiting: %#v", status)
|
||||
}
|
||||
if status.State.Waiting.Reason != "PodInitializing" {
|
||||
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
|
||||
}
|
||||
status = t.Status.InitContainerStatuses[0]
|
||||
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
|
||||
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
|
||||
}
|
||||
// continue until we see an attempt to restart the pod
|
||||
return status.LastTerminationState.Terminated != nil, nil
|
||||
default:
|
||||
return false, fmt.Errorf("unexpected object: %#v", t)
|
||||
}
|
||||
},
|
||||
// verify we get two restarts
|
||||
func(evt watch.Event) (bool, error) {
|
||||
switch t := evt.Object.(type) {
|
||||
case *v1.Pod:
|
||||
status := t.Status.InitContainerStatuses[0]
|
||||
if status.RestartCount < 3 {
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("init container has failed twice: %#v", t)
|
||||
// TODO: more conditions
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("unexpected object: %#v", t)
|
||||
}
|
||||
},
|
||||
)
|
||||
Expect(err).To(BeNil())
|
||||
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||
endPod := event.Object.(*v1.Pod)
|
||||
Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
|
||||
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
|
||||
Expect(init).NotTo(BeNil())
|
||||
Expect(init.Status).To(Equal(v1.ConditionFalse))
|
||||
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
|
||||
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
|
||||
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||
})
|
||||
|
||||
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
|
||||
framework.SkipIfContainerRuntimeIs("rkt") // #25988
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-init-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: "init1",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
},
|
||||
{
|
||||
Name: "init2",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/false"},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "run1",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/true"},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
framework.Logf("PodSpec: initContainers in spec.initContainers")
|
||||
startedPod := podClient.Create(pod)
|
||||
|
||||
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
|
||||
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
|
||||
|
||||
wr := watch.NewRecorder(w)
|
||||
event, err := watch.Until(
|
||||
framework.PodStartTimeout, wr,
|
||||
// check for the second container to fail at least once
|
||||
func(evt watch.Event) (bool, error) {
|
||||
switch t := evt.Object.(type) {
|
||||
case *v1.Pod:
|
||||
for _, status := range t.Status.ContainerStatuses {
|
||||
if status.State.Waiting == nil {
|
||||
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
|
||||
}
|
||||
if status.State.Waiting.Reason != "PodInitializing" {
|
||||
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
|
||||
}
|
||||
}
|
||||
if len(t.Status.InitContainerStatuses) != 2 {
|
||||
return false, nil
|
||||
}
|
||||
status := t.Status.InitContainerStatuses[0]
|
||||
if status.State.Terminated == nil {
|
||||
if status.State.Waiting != nil && status.State.Waiting.Reason != "PodInitializing" {
|
||||
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 {
|
||||
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
|
||||
}
|
||||
status = t.Status.InitContainerStatuses[1]
|
||||
if status.State.Terminated == nil {
|
||||
return false, nil
|
||||
}
|
||||
if status.State.Terminated.ExitCode == 0 {
|
||||
return false, fmt.Errorf("second init container should have failed: %#v", status)
|
||||
}
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("unexpected object: %#v", t)
|
||||
}
|
||||
},
|
||||
conditions.PodCompleted,
|
||||
)
|
||||
Expect(err).To(BeNil())
|
||||
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
|
||||
endPod := event.Object.(*v1.Pod)
|
||||
|
||||
Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
|
||||
_, init := podutil.GetPodCondition(&endPod.Status, v1.PodInitialized)
|
||||
Expect(init).NotTo(BeNil())
|
||||
Expect(init.Status).To(Equal(v1.ConditionFalse))
|
||||
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
|
||||
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
|
||||
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
|
||||
Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil())
|
||||
})
|
||||
})
|
229
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
Normal file
229
vendor/k8s.io/kubernetes/test/e2e/common/kubelet_etc_hosts.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
etcHostsPodName = "test-pod"
|
||||
etcHostsHostNetworkPodName = "test-host-network-pod"
|
||||
etcHostsPartialContent = "# Kubernetes-managed hosts file."
|
||||
)
|
||||
|
||||
var etcHostsImageName = imageutils.GetE2EImage(imageutils.Netexec)
|
||||
|
||||
type KubeletManagedHostConfig struct {
|
||||
hostNetworkPod *v1.Pod
|
||||
pod *v1.Pod
|
||||
f *framework.Framework
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
|
||||
f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts")
|
||||
config := &KubeletManagedHostConfig{
|
||||
f: f,
|
||||
}
|
||||
|
||||
/*
|
||||
Testname: kubelet-managed-etc-hosts
|
||||
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
|
||||
it into the container.
|
||||
*/
|
||||
framework.ConformanceIt("should test kubelet managed /etc/hosts file ", func() {
|
||||
By("Setting up the test")
|
||||
config.setup()
|
||||
|
||||
By("Running the test")
|
||||
config.verifyEtcHosts()
|
||||
})
|
||||
})
|
||||
|
||||
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
|
||||
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
|
||||
assertManagedStatus(config, etcHostsPodName, true, "busybox-1")
|
||||
assertManagedStatus(config, etcHostsPodName, true, "busybox-2")
|
||||
|
||||
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
|
||||
assertManagedStatus(config, etcHostsPodName, false, "busybox-3")
|
||||
|
||||
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
|
||||
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-1")
|
||||
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) setup() {
|
||||
By("Creating hostNetwork=false pod")
|
||||
config.createPodWithoutHostNetwork()
|
||||
|
||||
By("Creating hostNetwork=true pod")
|
||||
config.createPodWithHostNetwork()
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
|
||||
podSpec := config.createPodSpec(etcHostsPodName)
|
||||
config.pod = config.f.PodClient().CreateSync(podSpec)
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
|
||||
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
|
||||
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
|
||||
}
|
||||
|
||||
func assertManagedStatus(
|
||||
config *KubeletManagedHostConfig, podName string, expectedIsManaged bool, name string) {
|
||||
// TODO: workaround for https://github.com/kubernetes/kubernetes/issues/34256
|
||||
//
|
||||
// Retry until timeout for the contents of /etc/hosts to show
|
||||
// up. Note: if /etc/hosts is properly mounted, then this will
|
||||
// succeed immediately.
|
||||
const retryTimeout = 30 * time.Second
|
||||
|
||||
retryCount := 0
|
||||
etcHostsContent := ""
|
||||
|
||||
for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
|
||||
etcHostsContent = config.getEtcHostsContent(podName, name)
|
||||
isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
|
||||
|
||||
if expectedIsManaged == isManaged {
|
||||
return
|
||||
}
|
||||
|
||||
glog.Warningf(
|
||||
"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
|
||||
podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)
|
||||
|
||||
retryCount++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
if expectedIsManaged {
|
||||
framework.Failf(
|
||||
"/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
|
||||
name, retryCount, etcHostsContent)
|
||||
} else {
|
||||
framework.Failf(
|
||||
"/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
|
||||
name, retryCount, etcHostsContent)
|
||||
}
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string {
|
||||
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
|
||||
hostPathType := new(v1.HostPathType)
|
||||
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox-1",
|
||||
Image: etcHostsImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "busybox-2",
|
||||
Image: etcHostsImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "busybox-3",
|
||||
Image: etcHostsImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
MountPath: "/etc/hosts",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "host-etc-hosts",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/etc/hosts",
|
||||
Type: hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "busybox-1",
|
||||
Image: etcHostsImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "busybox-2",
|
||||
Image: etcHostsImageName,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"sleep",
|
||||
"900",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
81
vendor/k8s.io/kubernetes/test/e2e/common/networking.go
generated
vendored
Normal file
81
vendor/k8s.io/kubernetes/test/e2e/common/networking.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-network] Networking", func() {
|
||||
f := framework.NewDefaultFramework("pod-network-test")
|
||||
|
||||
Describe("Granular Checks: Pods", func() {
|
||||
|
||||
// Try to hit all endpoints through a test container, retry 5 times,
|
||||
// expect exactly one unique hostname. Each of these endpoints reports
|
||||
// its own hostname.
|
||||
/*
|
||||
Testname: networking-intra-pod-http
|
||||
Description: Try to hit test endpoints from a test container and make
|
||||
sure each of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for intra-pod communication: http ", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: networking-intra-pod-udp
|
||||
Description: Try to hit test endpoints from a test container using udp
|
||||
and make sure each of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for intra-pod communication: udp ", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: networking-node-pod-http
|
||||
Description: Try to hit test endpoints from the pod and make sure each
|
||||
of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for node-pod communication: http ", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: networking-node-pod-udp
|
||||
Description: Try to hit test endpoints from the pod using udp and make sure
|
||||
each of them can report a unique hostname.
|
||||
*/
|
||||
framework.ConformanceIt("should function for node-pod communication: udp ", func() {
|
||||
config := framework.NewCoreNetworkingTestConfig(f)
|
||||
for _, endpointPod := range config.EndpointPods {
|
||||
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
691
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
Normal file
691
vendor/k8s.io/kubernetes/test/e2e/common/pods.go
generated
vendored
Normal file
@ -0,0 +1,691 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var (
|
||||
buildBackOffDuration = time.Minute
|
||||
syncLoopFrequency = 10 * time.Second
|
||||
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
|
||||
)
|
||||
|
||||
// testHostIP tests that a pod gets a host IP
|
||||
func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
|
||||
By("creating pod")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
// Try to make sure we get a hostIP for each pod.
|
||||
hostIPTimeout := 2 * time.Minute
|
||||
t := time.Now()
|
||||
for {
|
||||
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
|
||||
if p.Status.HostIP != "" {
|
||||
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
|
||||
break
|
||||
}
|
||||
if time.Since(t) >= hostIPTimeout {
|
||||
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
|
||||
p.Name, time.Since(t).Seconds())
|
||||
}
|
||||
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
|
||||
podClient.CreateSync(pod)
|
||||
time.Sleep(sleepAmount)
|
||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||
podName := pod.Name
|
||||
containerName := pod.Spec.Containers[0].Name
|
||||
|
||||
By("getting restart delay-0")
|
||||
_, err := getRestartDelay(podClient, podName, containerName)
|
||||
if err != nil {
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
By("getting restart delay-1")
|
||||
delay1, err := getRestartDelay(podClient, podName, containerName)
|
||||
if err != nil {
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
By("getting restart delay-2")
|
||||
delay2, err := getRestartDelay(podClient, podName, containerName)
|
||||
if err != nil {
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
return delay1, delay2
|
||||
}
|
||||
|
||||
func getRestartDelay(podClient *framework.PodClient, podName string, containerName string) (time.Duration, error) {
|
||||
beginTime := time.Now()
|
||||
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
|
||||
time.Sleep(time.Second)
|
||||
pod, err := podClient.Get(podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
||||
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
|
||||
if !ok {
|
||||
framework.Logf("getRestartDelay: status missing")
|
||||
continue
|
||||
}
|
||||
|
||||
if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) {
|
||||
startedAt := status.State.Running.StartedAt.Time
|
||||
finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time
|
||||
framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt))
|
||||
return startedAt.Sub(finishedAt), nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("timeout getting pod restart delay")
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("Pods", func() {
|
||||
f := framework.NewDefaultFramework("pods")
|
||||
var podClient *framework.PodClient
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-created-pod-assigned-hostip
|
||||
Description: Make sure when a pod is created that it is assigned a host IP
|
||||
Address.
|
||||
*/
|
||||
framework.ConformanceIt("should get a host IP ", func() {
|
||||
name := "pod-hostip-" + string(uuid.NewUUID())
|
||||
testHostIP(podClient, &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-submitted-removed
|
||||
Description: Makes sure a pod is created, a watch can be setup for the pod,
|
||||
pod creation was observed, pod is deleted, and pod deletion is observed.
|
||||
*/
|
||||
framework.ConformanceIt("should be submitted and removed ", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
options = metav1.ListOptions{
|
||||
LabelSelector: selector.String(),
|
||||
ResourceVersion: pods.ListMeta.ResourceVersion,
|
||||
}
|
||||
w, err := podClient.Watch(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.Create(pod)
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("verifying pod creation was observed")
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
if event.Type != watch.Added {
|
||||
framework.Failf("Failed to observe pod creation: %v", event)
|
||||
}
|
||||
case <-time.After(framework.PodStartTimeout):
|
||||
framework.Failf("Timeout while waiting for pod creation")
|
||||
}
|
||||
|
||||
// We need to wait for the pod to be running, otherwise the deletion
|
||||
// may be carried out immediately rather than gracefully.
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
// save the running pod
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
|
||||
framework.Logf("running pod: %#v", pod)
|
||||
|
||||
By("deleting the pod gracefully")
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
|
||||
|
||||
By("verifying the kubelet observed the termination notice")
|
||||
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||
return false, nil
|
||||
}
|
||||
for _, kubeletPod := range podList.Items {
|
||||
if pod.Name != kubeletPod.Name {
|
||||
continue
|
||||
}
|
||||
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
||||
framework.Logf("deletion has not yet been observed")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||
return true, nil
|
||||
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
||||
|
||||
By("verifying pod deletion was observed")
|
||||
deleted := false
|
||||
var lastPod *v1.Pod
|
||||
timer := time.After(framework.DefaultPodDeletionTimeout)
|
||||
for !deleted {
|
||||
select {
|
||||
case event, _ := <-w.ResultChan():
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
lastPod = event.Object.(*v1.Pod)
|
||||
deleted = true
|
||||
case watch.Error:
|
||||
framework.Logf("received a watch error: %v", event.Object)
|
||||
framework.Failf("watch closed with error")
|
||||
}
|
||||
case <-timer:
|
||||
framework.Failf("timed out waiting for pod deletion")
|
||||
}
|
||||
}
|
||||
if !deleted {
|
||||
framework.Failf("Failed to observe pod deletion")
|
||||
}
|
||||
|
||||
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
|
||||
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
|
||||
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(0))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-updated-successfully
|
||||
Description: Make sure it is possible to successfully update a pod's labels.
|
||||
*/
|
||||
framework.ConformanceIt("should be updated ", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-update-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
pod = podClient.CreateSync(pod)
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("updating the pod")
|
||||
podClient.Update(name, func(pod *v1.Pod) {
|
||||
value = strconv.Itoa(time.Now().Nanosecond())
|
||||
pod.Labels["time"] = value
|
||||
})
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("verifying the updated pod is in kubernetes")
|
||||
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options = metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err = podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
framework.Logf("Pod update OK")
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-update-active-deadline-seconds
|
||||
Description: Make sure it is possible to create a pod, update its
|
||||
activeDeadlineSecondsValue, and then waits for the deadline to pass
|
||||
and verifies the pod is terminated.
|
||||
*/
|
||||
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated ", func() {
|
||||
By("creating the pod")
|
||||
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"time": value,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
|
||||
Expect(len(pods.Items)).To(Equal(1))
|
||||
|
||||
By("updating the pod")
|
||||
podClient.Update(name, func(pod *v1.Pod) {
|
||||
newDeadline := int64(5)
|
||||
pod.Spec.ActiveDeadlineSeconds = &newDeadline
|
||||
})
|
||||
|
||||
framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: pods-contain-services-environment-variables
|
||||
Description: Make sure that when a pod is created it contains environment
|
||||
variables for each active service.
|
||||
*/
|
||||
framework.ConformanceIt("should contain environment variables for services ", func() {
|
||||
// Make a pod that will be a service.
|
||||
// This pod serves its hostname via HTTP.
|
||||
serverName := "server-envvars-" + string(uuid.NewUUID())
|
||||
serverPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverName,
|
||||
Labels: map[string]string{"name": serverName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "srv",
|
||||
Image: framework.ServeHostnameImage,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podClient.CreateSync(serverPod)
|
||||
|
||||
// This service exposes port 8080 of the test pod as a service on port 8765
|
||||
// TODO(filbranden): We would like to use a unique service name such as:
|
||||
// svcName := "svc-envvars-" + randomSuffix()
|
||||
// However, that affects the name of the environment variables which are the capitalized
|
||||
// service name, so that breaks this test. One possibility is to tweak the variable names
|
||||
// to match the service. Another is to rethink environment variable names and possibly
|
||||
// allow overriding the prefix in the service manifest.
|
||||
svcName := "fooservice"
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svcName,
|
||||
Labels: map[string]string{
|
||||
"name": svcName,
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 8765,
|
||||
TargetPort: intstr.FromInt(8080),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"name": serverName,
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service")
|
||||
|
||||
// Make a client pod that verifies that it has the service environment variables.
|
||||
podName := "client-envvars-" + string(uuid.NewUUID())
|
||||
const containerName = "env3cont"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
// It's possible for the Pod to be created before the Kubelet is updated with the new
|
||||
// service. In that case, we just retry.
|
||||
const maxRetries = 3
|
||||
expectedVars := []string{
|
||||
"FOOSERVICE_SERVICE_HOST=",
|
||||
"FOOSERVICE_SERVICE_PORT=",
|
||||
"FOOSERVICE_PORT=",
|
||||
"FOOSERVICE_PORT_8765_TCP_PORT=",
|
||||
"FOOSERVICE_PORT_8765_TCP_PROTO=",
|
||||
"FOOSERVICE_PORT_8765_TCP=",
|
||||
"FOOSERVICE_PORT_8765_TCP_ADDR=",
|
||||
}
|
||||
framework.ExpectNoErrorWithRetries(func() error {
|
||||
return f.MatchContainerOutput(pod, containerName, expectedVars, ContainSubstring)
|
||||
}, maxRetries, "Container should have service environment variables set")
|
||||
})
|
||||
|
||||
It("should support remote command execution over websockets", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-exec-websocket-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
pod = podClient.CreateSync(pod)
|
||||
|
||||
req := f.ClientSet.CoreV1().RESTClient().Get().
|
||||
Namespace(f.Namespace.Name).
|
||||
Resource("pods").
|
||||
Name(pod.Name).
|
||||
Suffix("exec").
|
||||
Param("stderr", "1").
|
||||
Param("stdout", "1").
|
||||
Param("container", pod.Spec.Containers[0].Name).
|
||||
Param("command", "cat").
|
||||
Param("command", "/etc/resolv.conf")
|
||||
|
||||
url := req.URL()
|
||||
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
|
||||
}
|
||||
defer ws.Close()
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
Eventually(func() error {
|
||||
for {
|
||||
var msg []byte
|
||||
if err := websocket.Message.Receive(ws, &msg); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if len(msg) == 0 {
|
||||
continue
|
||||
}
|
||||
if msg[0] != 1 {
|
||||
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
|
||||
}
|
||||
buf.Write(msg[1:])
|
||||
}
|
||||
if buf.Len() == 0 {
|
||||
return fmt.Errorf("Unexpected output from server")
|
||||
}
|
||||
if !strings.Contains(buf.String(), "nameserver") {
|
||||
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, 10*time.Second).Should(BeNil())
|
||||
})
|
||||
|
||||
It("should support retrieving logs from the container over websockets", func() {
|
||||
config, err := framework.LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
|
||||
|
||||
By("creating the pod")
|
||||
name := "pod-logs-websocket-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "main",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
By("submitting the pod to kubernetes")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
req := f.ClientSet.CoreV1().RESTClient().Get().
|
||||
Namespace(f.Namespace.Name).
|
||||
Resource("pods").
|
||||
Name(pod.Name).
|
||||
Suffix("log").
|
||||
Param("container", pod.Spec.Containers[0].Name)
|
||||
|
||||
url := req.URL()
|
||||
|
||||
ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
|
||||
if err != nil {
|
||||
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
|
||||
}
|
||||
defer ws.Close()
|
||||
buf := &bytes.Buffer{}
|
||||
for {
|
||||
var msg []byte
|
||||
if err := websocket.Message.Receive(ws, &msg); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
|
||||
}
|
||||
if len(strings.TrimSpace(string(msg))) == 0 {
|
||||
continue
|
||||
}
|
||||
buf.Write(msg)
|
||||
}
|
||||
if buf.String() != "container is alive\n" {
|
||||
framework.Failf("Unexpected websocket logs:\n%s", buf.String())
|
||||
}
|
||||
})
|
||||
|
||||
It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
|
||||
podName := "pod-back-off-image"
|
||||
containerName := "back-off"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "back-off-image"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
|
||||
|
||||
By("updating the image")
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
|
||||
})
|
||||
|
||||
time.Sleep(syncLoopFrequency)
|
||||
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
|
||||
|
||||
By("get restart delay after image update")
|
||||
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
|
||||
if err != nil {
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
|
||||
framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
|
||||
}
|
||||
})
|
||||
|
||||
// Slow issue #19027 (20 mins)
|
||||
It("should cap back-off at MaxContainerBackOff [Slow]", func() {
|
||||
podName := "back-off-cap"
|
||||
containerName := "back-off-cap"
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: busyboxImage,
|
||||
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
podClient.CreateSync(pod)
|
||||
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
|
||||
|
||||
// wait for a delay == capped delay of MaxContainerBackOff
|
||||
By("geting restart delay when capped")
|
||||
var (
|
||||
delay1 time.Duration
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 3; i++ {
|
||||
delay1, err = getRestartDelay(podClient, podName, containerName)
|
||||
if err != nil {
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
if delay1 < kubelet.MaxContainerBackOff {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
|
||||
framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
|
||||
}
|
||||
|
||||
By("getting restart delay after a capped delay")
|
||||
delay2, err := getRestartDelay(podClient, podName, containerName)
|
||||
if err != nil {
|
||||
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
|
||||
}
|
||||
|
||||
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
|
||||
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
|
||||
}
|
||||
})
|
||||
})
|
113
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go
generated
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
type PrivilegedPodTestConfig struct {
|
||||
f *framework.Framework
|
||||
|
||||
privilegedPod string
|
||||
privilegedContainer string
|
||||
notPrivilegedContainer string
|
||||
|
||||
pod *v1.Pod
|
||||
}
|
||||
|
||||
var _ = framework.KubeDescribe("PrivilegedPod", func() {
|
||||
config := &PrivilegedPodTestConfig{
|
||||
f: framework.NewDefaultFramework("e2e-privileged-pod"),
|
||||
privilegedPod: "privileged-pod",
|
||||
privilegedContainer: "privileged-container",
|
||||
notPrivilegedContainer: "not-privileged-container",
|
||||
}
|
||||
|
||||
It("should enable privileged commands", func() {
|
||||
By("Creating a pod with a privileged container")
|
||||
config.createPods()
|
||||
|
||||
By("Executing in the privileged container")
|
||||
config.run(config.privilegedContainer, true)
|
||||
|
||||
By("Executing in the non-privileged container")
|
||||
config.run(config.notPrivilegedContainer, false)
|
||||
})
|
||||
})
|
||||
|
||||
func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool) {
|
||||
cmd := []string{"ip", "link", "add", "dummy1", "type", "dummy"}
|
||||
reverseCmd := []string{"ip", "link", "del", "dummy1"}
|
||||
|
||||
stdout, stderr, err := c.f.ExecCommandInContainerWithFullOutput(
|
||||
c.privilegedPod, containerName, cmd...)
|
||||
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
|
||||
|
||||
if expectSuccess {
|
||||
Expect(err).NotTo(HaveOccurred(), msg)
|
||||
// We need to clean up the dummy link that was created, as it
|
||||
// leaks out into the node level -- yuck.
|
||||
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
|
||||
c.privilegedPod, containerName, reverseCmd...)
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
fmt.Sprintf("could not remove dummy1 link: %v", err))
|
||||
} else {
|
||||
Expect(err).To(HaveOccurred(), msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
|
||||
isPrivileged := true
|
||||
notPrivileged := false
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.privilegedPod,
|
||||
Namespace: c.f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: c.privilegedContainer,
|
||||
Image: busyboxImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
|
||||
Command: []string{"/bin/sleep", "10000"},
|
||||
},
|
||||
{
|
||||
Name: c.notPrivilegedContainer,
|
||||
Image: busyboxImage,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{Privileged: ¬Privileged},
|
||||
Command: []string{"/bin/sleep", "10000"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *PrivilegedPodTestConfig) createPods() {
|
||||
podSpec := c.createPodsSpec()
|
||||
c.pod = c.f.PodClient().CreateSync(podSpec)
|
||||
}
|
1675
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
Normal file
1675
vendor/k8s.io/kubernetes/test/e2e/common/projected.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
140
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go
generated
vendored
Normal file
140
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-api-machinery] Secrets", func() {
|
||||
f := framework.NewDefaultFramework("secrets")
|
||||
|
||||
/*
|
||||
Testname: secret-env-vars
|
||||
Description: Ensure that secret can be consumed via environment
|
||||
variables.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in env vars ", func() {
|
||||
name := "secret-test-" + string(uuid.NewUUID())
|
||||
secret := secretForTest(f.Namespace.Name, name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-env-test",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "SECRET_DATA",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Key: "data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume secrets", pod, 0, []string{
|
||||
"SECRET_DATA=value-1",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-configmaps-source
|
||||
Description: Ensure that secret can be consumed via source of a set
|
||||
of ConfigMaps.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable via the environment ", func() {
|
||||
name := "secret-test-" + string(uuid.NewUUID())
|
||||
secret := newEnvFromSecret(f.Namespace.Name, name)
|
||||
By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "env-test",
|
||||
Image: busyboxImage,
|
||||
Command: []string{"sh", "-c", "env"},
|
||||
EnvFrom: []v1.EnvFromSource{
|
||||
{
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
|
||||
},
|
||||
{
|
||||
Prefix: "p_",
|
||||
SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume secrets", pod, 0, []string{
|
||||
"data_1=value-1", "data_2=value-2", "data_3=value-3",
|
||||
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func newEnvFromSecret(namespace, name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data_1": []byte("value-1\n"),
|
||||
"data_2": []byte("value-2\n"),
|
||||
"data_3": []byte("value-3\n"),
|
||||
},
|
||||
}
|
||||
}
|
516
vendor/k8s.io/kubernetes/test/e2e/common/secrets_volume.go
generated
vendored
Normal file
516
vendor/k8s.io/kubernetes/test/e2e/common/secrets_volume.go
generated
vendored
Normal file
@ -0,0 +1,516 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("[sig-storage] Secrets", func() {
|
||||
f := framework.NewDefaultFramework("secrets")
|
||||
|
||||
/*
|
||||
Testname: secret-volume-mount-without-mapping
|
||||
Description: Ensure that secret can be mounted without mapping to a
|
||||
pod volume.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume ", func() {
|
||||
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-volume-mount-without-mapping-default-mode
|
||||
Description: Ensure that secret can be mounted without mapping to a
|
||||
pod volume in default mode.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set ", func() {
|
||||
defaultMode := int32(0400)
|
||||
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-volume-mount-without-mapping-non-root-default-mode-fsgroup
|
||||
Description: Ensure that secret can be mounted without mapping to a pod
|
||||
volume as non-root in default mode with fsGroup set.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set ", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
fsGroup := int64(1001)
|
||||
uid := int64(1000)
|
||||
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &uid)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-volume-mount-with-mapping
|
||||
Description: Ensure that secret can be mounted with mapping to a pod
|
||||
volume.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings ", func() {
|
||||
doSecretE2EWithMapping(f, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-volume-mount-with-mapping-item-mode
|
||||
Description: Ensure that secret can be mounted with mapping to a pod
|
||||
volume in item mode.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set ", func() {
|
||||
mode := int32(0400)
|
||||
doSecretE2EWithMapping(f, &mode)
|
||||
})
|
||||
|
||||
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
|
||||
var (
|
||||
namespace2 *v1.Namespace
|
||||
err error
|
||||
secret2Name = "secret-test-" + string(uuid.NewUUID())
|
||||
)
|
||||
|
||||
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
|
||||
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
|
||||
}
|
||||
|
||||
secret2 := secretForTest(namespace2.Name, secret2Name)
|
||||
secret2.Data = map[string][]byte{
|
||||
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
|
||||
}
|
||||
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
|
||||
}
|
||||
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-multiple-volume-mounts
|
||||
Description: Ensure that secret can be mounted to multiple pod volumes.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod ", func() {
|
||||
// This test ensures that the same secret can be mounted in multiple
|
||||
// volumes in the same pod. This test case exists to prevent
|
||||
// regressions that break this use-case.
|
||||
var (
|
||||
name = "secret-test-" + string(uuid.NewUUID())
|
||||
volumeName = "secret-volume"
|
||||
volumeMountPath = "/etc/secret-volume"
|
||||
volumeName2 = "secret-volume-2"
|
||||
volumeMountPath2 = "/etc/secret-volume-2"
|
||||
secret = secretForTest(f.Namespace.Name, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
"--file_content=/etc/secret-volume/data-1",
|
||||
"--file_mode=/etc/secret-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume secrets", pod, 0, []string{
|
||||
"content of file \"/etc/secret-volume/data-1\": value-1",
|
||||
"mode of file \"/etc/secret-volume/data-1\": -rw-r--r--",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Testname: secret-mounted-volume-optional-update-change
|
||||
Description: Ensure that optional update change to secret can be
|
||||
reflected on a mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume ", func() {
|
||||
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
|
||||
deleteName := "s-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "dels-volume-test"
|
||||
deleteVolumeName := "deletes-volume"
|
||||
deleteSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "s-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "upds-volume-test"
|
||||
updateVolumeName := "updates-volume"
|
||||
updateSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
createName := "s-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "creates-volume-test"
|
||||
createVolumeName := "creates-volume"
|
||||
createSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
|
||||
var err error
|
||||
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
|
||||
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: deleteName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: updateName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: createName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: mountImage,
|
||||
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
|
||||
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
|
||||
}
|
||||
|
||||
By("waiting to observe update in volume")
|
||||
|
||||
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
|
||||
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
|
||||
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
|
||||
})
|
||||
})
|
||||
|
||||
func secretForTest(namespace, name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1\n"),
|
||||
"data-2": []byte("value-2\n"),
|
||||
"data-3": []byte("value-3\n"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string,
|
||||
fsGroup *int64, uid *int64) {
|
||||
var (
|
||||
volumeName = "secret-volume"
|
||||
volumeMountPath = "/etc/secret-volume"
|
||||
secret = secretForTest(f.Namespace.Name, secretName)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: secretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
"--file_content=/etc/secret-volume/data-1",
|
||||
"--file_mode=/etc/secret-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.Secret.DefaultMode = defaultMode
|
||||
} else {
|
||||
mode := int32(0644)
|
||||
defaultMode = &mode
|
||||
}
|
||||
|
||||
if fsGroup != nil || uid != nil {
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
RunAsUser: uid,
|
||||
}
|
||||
}
|
||||
|
||||
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
|
||||
expectedOutput := []string{
|
||||
"content of file \"/etc/secret-volume/data-1\": value-1",
|
||||
"mode of file \"/etc/secret-volume/data-1\": " + modeString,
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
|
||||
}
|
||||
|
||||
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
|
||||
var (
|
||||
name = "secret-test-map-" + string(uuid.NewUUID())
|
||||
volumeName = "secret-volume"
|
||||
volumeMountPath = "/etc/secret-volume"
|
||||
secret = secretForTest(f.Namespace.Name, name)
|
||||
)
|
||||
|
||||
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-1",
|
||||
Path: "new-path-data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: mountImage,
|
||||
Args: []string{
|
||||
"--file_content=/etc/secret-volume/new-path-data-1",
|
||||
"--file_mode=/etc/secret-volume/new-path-data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if mode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.Secret.Items[0].Mode = mode
|
||||
} else {
|
||||
defaultItemMode := int32(0644)
|
||||
mode = &defaultItemMode
|
||||
}
|
||||
|
||||
modeString := fmt.Sprintf("%v", os.FileMode(*mode))
|
||||
expectedOutput := []string{
|
||||
"content of file \"/etc/secret-volume/new-path-data-1\": value-1",
|
||||
"mode of file \"/etc/secret-volume/new-path-data-1\": " + modeString,
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
|
||||
}
|
210
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go
generated
vendored
Normal file
210
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go
generated
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("Sysctls", func() {
|
||||
f := framework.NewDefaultFramework("sysctl")
|
||||
var podClient *framework.PodClient
|
||||
|
||||
testPod := func() *v1.Pod {
|
||||
podName := "sysctl-" + string(uuid.NewUUID())
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
Image: busyboxImage,
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return &pod
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
It("should support sysctls", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shm_rmid_forced",
|
||||
Value: "1",
|
||||
},
|
||||
})
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
|
||||
|
||||
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
By("Watching for error events or started pod")
|
||||
// watch for events instead of termination of pod because the kubelet deletes
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
Expect(ev).To(BeNil())
|
||||
|
||||
By("Waiting for pod completion")
|
||||
err = f.WaitForPodNoLongerRunning(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that the pod succeeded")
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
|
||||
By("Getting logs from the pod")
|
||||
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that the sysctl is actually updated")
|
||||
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
|
||||
})
|
||||
|
||||
It("should support unsafe sysctls which are actually whitelisted", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shm_rmid_forced",
|
||||
Value: "1",
|
||||
},
|
||||
})
|
||||
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
|
||||
|
||||
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
By("Watching for error events or started pod")
|
||||
// watch for events instead of termination of pod because the kubelet deletes
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
Expect(ev).To(BeNil())
|
||||
|
||||
By("Waiting for pod completion")
|
||||
err = f.WaitForPodNoLongerRunning(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that the pod succeeded")
|
||||
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
|
||||
|
||||
By("Getting logs from the pod")
|
||||
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that the sysctl is actually updated")
|
||||
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
|
||||
})
|
||||
|
||||
It("should reject invalid sysctls", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "foo-",
|
||||
Value: "bar",
|
||||
},
|
||||
{
|
||||
Name: "kernel.shmmax",
|
||||
Value: "100000000",
|
||||
},
|
||||
{
|
||||
Name: "safe-and-unsafe",
|
||||
Value: "100000000",
|
||||
},
|
||||
})
|
||||
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.shmall",
|
||||
Value: "100000000",
|
||||
},
|
||||
{
|
||||
Name: "bar..",
|
||||
Value: "42",
|
||||
},
|
||||
{
|
||||
Name: "safe-and-unsafe",
|
||||
Value: "100000000",
|
||||
},
|
||||
})
|
||||
|
||||
By("Creating a pod with one valid and two invalid sysctls")
|
||||
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
_, err := client.Create(pod)
|
||||
|
||||
Expect(err).NotTo(BeNil())
|
||||
Expect(err.Error()).To(ContainSubstring(`Invalid value: "foo-"`))
|
||||
Expect(err.Error()).To(ContainSubstring(`Invalid value: "bar.."`))
|
||||
Expect(err.Error()).To(ContainSubstring(`safe-and-unsafe`))
|
||||
Expect(err.Error()).NotTo(ContainSubstring("kernel.shmmax"))
|
||||
})
|
||||
|
||||
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
|
||||
pod := testPod()
|
||||
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1helper.PodAnnotationsFromSysctls([]v1.Sysctl{
|
||||
{
|
||||
Name: "kernel.msgmax",
|
||||
Value: "10000000000",
|
||||
},
|
||||
})
|
||||
|
||||
By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
|
||||
pod = podClient.Create(pod)
|
||||
|
||||
By("Watching for error events or started pod")
|
||||
// watch for events instead of termination of pod because the kubelet deletes
|
||||
// failed pods without running containers. This would create a race as the pod
|
||||
// might have already been deleted here.
|
||||
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
|
||||
framework.Skipf("No sysctl support in Docker <1.12")
|
||||
}
|
||||
|
||||
By("Checking that the pod was rejected")
|
||||
Expect(ev).ToNot(BeNil())
|
||||
Expect(ev.Reason).To(Equal("SysctlForbidden"))
|
||||
})
|
||||
})
|
136
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
Normal file
136
vendor/k8s.io/kubernetes/test/e2e/common/util.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
type Suite string
|
||||
|
||||
const (
|
||||
E2E Suite = "e2e"
|
||||
NodeE2E Suite = "node e2e"
|
||||
)
|
||||
|
||||
var (
|
||||
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
|
||||
busyboxImage = "busybox"
|
||||
)
|
||||
|
||||
var CurrentSuite Suite
|
||||
|
||||
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
|
||||
// before a tests starts, so that the tests won't fail due image pulling flakes. Currently, this is
|
||||
// only used by node e2e test.
|
||||
// TODO(random-liu): Change the image puller pod to use similar mechanism.
|
||||
var CommonImageWhiteList = sets.NewString(
|
||||
"busybox",
|
||||
imageutils.GetE2EImage(imageutils.EntrypointTester),
|
||||
imageutils.GetE2EImage(imageutils.Liveness),
|
||||
imageutils.GetE2EImage(imageutils.Mounttest),
|
||||
imageutils.GetE2EImage(imageutils.MounttestUser),
|
||||
imageutils.GetE2EImage(imageutils.Netexec),
|
||||
imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
imageutils.GetE2EImage(imageutils.ServeHostname),
|
||||
imageutils.GetE2EImage(imageutils.TestWebserver),
|
||||
imageutils.GetE2EImage(imageutils.Hostexec),
|
||||
"gcr.io/google_containers/volume-nfs:0.8",
|
||||
"gcr.io/google_containers/volume-gluster:0.2",
|
||||
"gcr.io/google_containers/e2e-net-amd64:1.0",
|
||||
)
|
||||
|
||||
func svcByName(name string, port int) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Selector: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: int32(port),
|
||||
TargetPort: intstr.FromInt(port),
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewSVCByName(c clientset.Interface, ns, name string) error {
|
||||
const testPort = 9376
|
||||
_, err := c.CoreV1().Services(ns).Create(svcByName(name, testPort))
|
||||
return err
|
||||
}
|
||||
|
||||
// NewRCByName creates a replication controller with a selector by name of name.
|
||||
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
|
||||
By(fmt.Sprintf("creating replication controller %s", name))
|
||||
return c.CoreV1().ReplicationControllers(ns).Create(framework.RcByNamePort(
|
||||
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
|
||||
}
|
||||
|
||||
func RestartNodes(c clientset.Interface, nodeNames []string) error {
|
||||
// List old boot IDs.
|
||||
oldBootIDs := make(map[string]string)
|
||||
for _, name := range nodeNames {
|
||||
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node info before reboot: %s", err)
|
||||
}
|
||||
oldBootIDs[name] = node.Status.NodeInfo.BootID
|
||||
}
|
||||
// Reboot the nodes.
|
||||
args := []string{
|
||||
"compute",
|
||||
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
|
||||
"instances",
|
||||
"reset",
|
||||
}
|
||||
args = append(args, nodeNames...)
|
||||
args = append(args, fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone))
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
}
|
||||
// Wait for their boot IDs to change.
|
||||
for _, name := range nodeNames {
|
||||
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error getting node info after reboot: %s", err)
|
||||
}
|
||||
return node.Status.NodeInfo.BootID != oldBootIDs[name], nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error waiting for node %s boot ID to change: %s", name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
153
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go
generated
vendored
Normal file
153
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go
generated
vendored
Normal file
@ -0,0 +1,153 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This test checks that various VolumeSources are working.
|
||||
*
|
||||
* There are two ways, how to test the volumes:
|
||||
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
|
||||
* The test creates a server pod, exporting simple 'index.html' file.
|
||||
* Then it uses appropriate VolumeSource to import this file into a client pod
|
||||
* and checks that the pod can see the file. It does so by importing the file
|
||||
* into web server root and loadind the index.html from it.
|
||||
*
|
||||
* These tests work only when privileged containers are allowed, exporting
|
||||
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
|
||||
* other privileged magic in the server pod.
|
||||
*
|
||||
* Note that the server containers are for testing purposes only and should not
|
||||
* be used in production.
|
||||
*
|
||||
* 2) With server outside of Kubernetes (Cinder, ...)
|
||||
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
|
||||
* the tested Kubernetes cluster. The test itself creates a new volume,
|
||||
* and checks, that Kubernetes can use it as a volume.
|
||||
*/
|
||||
|
||||
// GlusterFS test is duplicated from test/e2e/volumes.go. Any changes made there
|
||||
// should be duplicated here
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// These tests need privileged containers, which are disabled by default. Run
|
||||
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
|
||||
var _ = Describe("[sig-storage] GCP Volumes", func() {
|
||||
f := framework.NewDefaultFramework("gcp-volume")
|
||||
|
||||
// note that namespace deletion is handled by delete-namespace flag
|
||||
// filled in BeforeEach
|
||||
var namespace *v1.Namespace
|
||||
var c clientset.Interface
|
||||
|
||||
BeforeEach(func() {
|
||||
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
|
||||
|
||||
namespace = f.Namespace
|
||||
c = f.ClientSet
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// NFS
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("NFSv4", func() {
|
||||
It("should be mountable for NFSv4", func() {
|
||||
config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{})
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
ExpectedContent: "Hello from NFS!",
|
||||
},
|
||||
}
|
||||
|
||||
// Must match content of test/images/volumes-tester/nfs/index.html
|
||||
framework.TestVolumeClient(c, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("NFSv3", func() {
|
||||
It("should be mountable for NFSv3", func() {
|
||||
config, _, serverIP := framework.NewNFSServer(c, namespace.Name, []string{})
|
||||
defer framework.VolumeTestCleanup(f, config)
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
NFS: &v1.NFSVolumeSource{
|
||||
Server: serverIP,
|
||||
Path: "/exports",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
ExpectedContent: "Hello from NFS!",
|
||||
},
|
||||
}
|
||||
// Must match content of test/images/volume-tester/nfs/index.html
|
||||
framework.TestVolumeClient(c, config, nil, tests)
|
||||
})
|
||||
})
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Gluster
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
Describe("GlusterFS", func() {
|
||||
It("should be mountable", func() {
|
||||
// create gluster server and endpoints
|
||||
config, _, _ := framework.NewGlusterfsServer(c, namespace.Name)
|
||||
name := config.Prefix + "-server"
|
||||
defer func() {
|
||||
framework.VolumeTestCleanup(f, config)
|
||||
err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed")
|
||||
}()
|
||||
|
||||
tests := []framework.VolumeTest{
|
||||
{
|
||||
Volume: v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: name,
|
||||
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
|
||||
Path: "test_vol",
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
File: "index.html",
|
||||
// Must match content of test/images/volumes-tester/gluster/index.html
|
||||
ExpectedContent: "Hello from GlusterFS!",
|
||||
},
|
||||
}
|
||||
framework.TestVolumeClient(c, config, nil, tests)
|
||||
})
|
||||
})
|
||||
})
|
3
vendor/k8s.io/kubernetes/test/e2e/e2e-example-config.json
generated
vendored
Normal file
3
vendor/k8s.io/kubernetes/test/e2e/e2e-example-config.json
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"provider":"local"
|
||||
}
|
367
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
Normal file
367
vendor/k8s.io/kubernetes/test/e2e/e2e.go
generated
vendored
Normal file
@ -0,0 +1,367 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/logs"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
commontest "k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
cloudConfig = &framework.TestContext.CloudConfig
|
||||
)
|
||||
|
||||
// setupProviderConfig validates and sets up cloudConfig based on framework.TestContext.Provider.
|
||||
func setupProviderConfig() error {
|
||||
switch framework.TestContext.Provider {
|
||||
case "":
|
||||
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
|
||||
case "gce", "gke":
|
||||
framework.Logf("Fetching cloud provider for %q\r\n", framework.TestContext.Provider)
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
region := framework.TestContext.CloudConfig.Region
|
||||
|
||||
var err error
|
||||
if region == "" {
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
managedZones := []string{} // Manage all zones in the region
|
||||
if !framework.TestContext.CloudConfig.MultiZone {
|
||||
managedZones = []string{zone}
|
||||
}
|
||||
|
||||
gceAlphaFeatureGate, err := gcecloud.NewAlphaFeatureGate([]string{gcecloud.AlphaFeatureNetworkEndpointGroup})
|
||||
if err != nil {
|
||||
glog.Errorf("Encountered error for creating alpha feature gate: %v", err)
|
||||
}
|
||||
|
||||
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
|
||||
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
|
||||
ProjectID: framework.TestContext.CloudConfig.ProjectID,
|
||||
Region: region,
|
||||
Zone: zone,
|
||||
ManagedZones: managedZones,
|
||||
NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network?
|
||||
SubnetworkName: "",
|
||||
NodeTags: nil,
|
||||
NodeInstancePrefix: "",
|
||||
TokenSource: nil,
|
||||
UseMetadataServer: false,
|
||||
AlphaFeatureGate: gceAlphaFeatureGate})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error building GCE/GKE provider: %v", err)
|
||||
}
|
||||
|
||||
cloudConfig.Provider = gceCloud
|
||||
|
||||
// Arbitrarily pick one of the zones we have nodes in
|
||||
if cloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cloudConfig.Zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
case "aws":
|
||||
if cloudConfig.Zone == "" {
|
||||
return fmt.Errorf("gce-zone must be specified for AWS")
|
||||
}
|
||||
case "azure":
|
||||
if cloudConfig.ConfigFile == "" {
|
||||
return fmt.Errorf("config-file must be specified for Azure")
|
||||
}
|
||||
config, err := os.Open(cloudConfig.ConfigFile)
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't open cloud provider configuration %s: %#v",
|
||||
cloudConfig.ConfigFile, err)
|
||||
}
|
||||
defer config.Close()
|
||||
cloudConfig.Provider, err = azure.NewCloud(config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// There are certain operations we only want to run once per overall test invocation
|
||||
// (such as deleting old namespaces, or verifying that all system pods are running.
|
||||
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
|
||||
// to ensure that these operations only run on the first parallel Ginkgo node.
|
||||
//
|
||||
// This function takes two parameters: one function which runs on only the first Ginkgo node,
|
||||
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
|
||||
// accepting the byte array.
|
||||
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
// Run only on Ginkgo node 1
|
||||
|
||||
if err := setupProviderConfig(); err != nil {
|
||||
framework.Failf("Failed to setup provider config: %v", err)
|
||||
}
|
||||
|
||||
switch framework.TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
framework.LogClusterImageSources()
|
||||
}
|
||||
|
||||
c, err := framework.LoadClientset()
|
||||
if err != nil {
|
||||
glog.Fatal("Error loading client: ", err)
|
||||
}
|
||||
|
||||
// Delete any namespaces except those created by the system. This ensures no
|
||||
// lingering resources are left over from a previous test run.
|
||||
if framework.TestContext.CleanStart {
|
||||
deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */
|
||||
[]string{
|
||||
metav1.NamespaceSystem,
|
||||
metav1.NamespaceDefault,
|
||||
metav1.NamespacePublic,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting orphaned namespaces: %v", err)
|
||||
}
|
||||
glog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
|
||||
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
|
||||
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
|
||||
}
|
||||
}
|
||||
|
||||
// In large clusters we may get to this point but still have a bunch
|
||||
// of nodes without Routes created. Since this would make a node
|
||||
// unschedulable, we need to wait until all of them are schedulable.
|
||||
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
|
||||
|
||||
// Ensure all pods are running and ready before starting tests (otherwise,
|
||||
// cluster infrastructure pods that are being pulled or started can block
|
||||
// test pods from running, and tests that ensure all pods are running and
|
||||
// ready will fail).
|
||||
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
|
||||
// TODO: In large clusters, we often observe a non-starting pods due to
|
||||
// #41007. To avoid those pods preventing the whole test runs (and just
|
||||
// wasting the whole run), we allow for some not-ready pods (with the
|
||||
// number equal to the number of allowed not-ready nodes).
|
||||
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, framework.ImagePullerLabels); err != nil {
|
||||
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
|
||||
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
|
||||
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
|
||||
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
|
||||
}
|
||||
|
||||
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout); err != nil {
|
||||
// There is no guarantee that the image pulling will succeed in 3 minutes
|
||||
// and we don't even run the image puller on all platforms (including GKE).
|
||||
// We wait for it so we get an indication of failures in the logs, and to
|
||||
// maximize benefit of image pre-pulling.
|
||||
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", framework.ImagePrePullingTimeout, err)
|
||||
}
|
||||
|
||||
// Dump the output of the nethealth containers only once per run
|
||||
if framework.TestContext.DumpLogsOnFailure {
|
||||
logFunc := framework.Logf
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
filePath := path.Join(framework.TestContext.ReportDir, "nethealth.txt")
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create a file with network health data %v: %v\nPrinting to stdout", filePath, err)
|
||||
} else {
|
||||
defer file.Close()
|
||||
if err = file.Chmod(0644); err != nil {
|
||||
framework.Logf("Failed to chmod to 644 of %v: %v", filePath, err)
|
||||
}
|
||||
logFunc = framework.GetLogToFileFunc(file)
|
||||
framework.Logf("Dumping network health container logs from all nodes to file %v", filePath)
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Dumping network health container logs from all nodes...")
|
||||
}
|
||||
framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", logFunc)
|
||||
}
|
||||
|
||||
// Log the version of the server and this client.
|
||||
framework.Logf("e2e test version: %s", version.Get().GitVersion)
|
||||
|
||||
dc := c.DiscoveryClient
|
||||
|
||||
serverVersion, serverErr := dc.ServerVersion()
|
||||
if serverErr != nil {
|
||||
framework.Logf("Unexpected server error retrieving version: %v", serverErr)
|
||||
}
|
||||
if serverVersion != nil {
|
||||
framework.Logf("kube-apiserver version: %s", serverVersion.GitVersion)
|
||||
}
|
||||
|
||||
// Reference common test to make the import valid.
|
||||
commontest.CurrentSuite = commontest.E2E
|
||||
|
||||
return nil
|
||||
|
||||
}, func(data []byte) {
|
||||
// Run on all Ginkgo nodes
|
||||
|
||||
if cloudConfig.Provider == nil {
|
||||
if err := setupProviderConfig(); err != nil {
|
||||
framework.Failf("Failed to setup provider config: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
|
||||
// Here, the order of functions is reversed; first, the function which runs everywhere,
|
||||
// and then the function that only runs on the first Ginkgo node.
|
||||
var _ = ginkgo.SynchronizedAfterSuite(func() {
|
||||
// Run on all Ginkgo nodes
|
||||
framework.Logf("Running AfterSuite actions on all node")
|
||||
framework.RunCleanupActions()
|
||||
}, func() {
|
||||
// Run only Ginkgo on node 1
|
||||
framework.Logf("Running AfterSuite actions on node 1")
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
framework.CoreDump(framework.TestContext.ReportDir)
|
||||
}
|
||||
if framework.TestContext.GatherSuiteMetricsAfterTest {
|
||||
if err := gatherTestSuiteMetrics(); err != nil {
|
||||
framework.Logf("Error gathering metrics: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
func gatherTestSuiteMetrics() error {
|
||||
framework.Logf("Gathering metrics")
|
||||
c, err := framework.LoadClientset()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading client: %v", err)
|
||||
}
|
||||
|
||||
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
|
||||
grabber, err := metrics.NewMetricsGrabber(c, nil, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
|
||||
}
|
||||
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to grab metrics: %v", err)
|
||||
}
|
||||
|
||||
metricsForE2E := (*framework.MetricsForE2E)(&received)
|
||||
metricsJson := metricsForE2E.PrintJSON()
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
|
||||
if err := ioutil.WriteFile(filePath, []byte(metricsJson), 0644); err != nil {
|
||||
return fmt.Errorf("error writing to %q: %v", filePath, err)
|
||||
}
|
||||
} else {
|
||||
framework.Logf("\n\nTest Suite Metrics:\n%s\n\n", metricsJson)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestE2E checks configuration parameters (specified through flags) and then runs
|
||||
// E2E tests using the Ginkgo runner.
|
||||
// If a "report directory" is specified, one or more JUnit test reports will be
|
||||
// generated in this directory, and cluster logs will also be saved.
|
||||
// This function is called on each Ginkgo node in parallel mode.
|
||||
func RunE2ETests(t *testing.T) {
|
||||
runtimeutils.ReallyCrash = true
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
gomega.RegisterFailHandler(ginkgowrapper.Fail)
|
||||
// Disable skipped tests unless they are explicitly requested.
|
||||
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
|
||||
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
|
||||
}
|
||||
|
||||
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
|
||||
var r []ginkgo.Reporter
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
// TODO: we should probably only be trying to create this directory once
|
||||
// rather than once-per-Ginkgo-node.
|
||||
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
|
||||
glog.Errorf("Failed creating report directory: %v", err)
|
||||
} else {
|
||||
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
|
||||
}
|
||||
}
|
||||
glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
|
||||
|
||||
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
|
||||
}
|
||||
|
||||
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
|
||||
// to flip to Ready, log its output and delete it.
|
||||
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
|
||||
path := "test/images/clusterapi-tester/pod.yaml"
|
||||
framework.Logf("Parsing pod from %v", path)
|
||||
p, err := manifest.PodFromManifest(path)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
|
||||
return
|
||||
}
|
||||
p.Namespace = ns
|
||||
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
|
||||
framework.Logf("Failed to create %v: %v", p.Name, err)
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
|
||||
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
|
||||
}
|
||||
}()
|
||||
timeout := 5 * time.Minute
|
||||
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
|
||||
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
|
||||
return
|
||||
}
|
||||
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
|
||||
} else {
|
||||
framework.Logf("Output of clusterapi-tester:\n%v", logs)
|
||||
}
|
||||
}
|
50
vendor/k8s.io/kubernetes/test/e2e/e2e_test.go
generated
vendored
Normal file
50
vendor/k8s.io/kubernetes/test/e2e/e2e_test.go
generated
vendored
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
// test sources
|
||||
_ "k8s.io/kubernetes/test/e2e/apimachinery"
|
||||
_ "k8s.io/kubernetes/test/e2e/apps"
|
||||
_ "k8s.io/kubernetes/test/e2e/auth"
|
||||
_ "k8s.io/kubernetes/test/e2e/autoscaling"
|
||||
_ "k8s.io/kubernetes/test/e2e/common"
|
||||
_ "k8s.io/kubernetes/test/e2e/instrumentation"
|
||||
_ "k8s.io/kubernetes/test/e2e/kubectl"
|
||||
_ "k8s.io/kubernetes/test/e2e/lifecycle"
|
||||
_ "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap"
|
||||
_ "k8s.io/kubernetes/test/e2e/multicluster"
|
||||
_ "k8s.io/kubernetes/test/e2e/network"
|
||||
_ "k8s.io/kubernetes/test/e2e/node"
|
||||
_ "k8s.io/kubernetes/test/e2e/scalability"
|
||||
_ "k8s.io/kubernetes/test/e2e/scheduling"
|
||||
_ "k8s.io/kubernetes/test/e2e/servicecatalog"
|
||||
_ "k8s.io/kubernetes/test/e2e/storage"
|
||||
_ "k8s.io/kubernetes/test/e2e/ui"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.ViperizeFlags()
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
RunE2ETests(t)
|
||||
}
|
616
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
Normal file
616
vendor/k8s.io/kubernetes/test/e2e/examples.go
generated
vendored
Normal file
@ -0,0 +1,616 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/generated"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
serverStartTimeout = framework.PodStartTimeout + 3*time.Minute
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
f := framework.NewDefaultFramework("examples")
|
||||
|
||||
// Reusable cluster state function. This won't be adversly affected by lazy initialization of framework.
|
||||
clusterState := func(selectorKey string, selectorValue string) *framework.ClusterVerification {
|
||||
return f.NewClusterVerification(
|
||||
f.Namespace,
|
||||
framework.PodStateVerification{
|
||||
Selectors: map[string]string{selectorKey: selectorValue},
|
||||
ValidPhases: []v1.PodPhase{v1.PodRunning},
|
||||
})
|
||||
}
|
||||
// Customized ForEach wrapper for this test.
|
||||
forEachPod := func(selectorKey string, selectorValue string, fn func(v1.Pod)) {
|
||||
clusterState(selectorKey, selectorValue).ForEach(fn)
|
||||
}
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
|
||||
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
|
||||
// lying around so we don't have to race any caches
|
||||
framework.BindClusterRoleInNamespace(c.RbacV1beta1(), "edit", f.Namespace.Name,
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
|
||||
|
||||
err := framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
|
||||
f.Namespace.Name, "create", schema.GroupResource{Resource: "pods"}, true)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Redis", func() {
|
||||
It("should create and stop redis servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/redis", file)
|
||||
}
|
||||
bootstrapYaml := mkpath("redis-master.yaml")
|
||||
sentinelServiceYaml := mkpath("redis-sentinel-service.yaml")
|
||||
sentinelControllerYaml := mkpath("redis-sentinel-controller.yaml")
|
||||
controllerYaml := mkpath("redis-controller.yaml")
|
||||
|
||||
bootstrapPodName := "redis-master"
|
||||
redisRC := "redis"
|
||||
sentinelRC := "redis-sentinel"
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
expectedOnServer := "The server is now ready to accept connections"
|
||||
expectedOnSentinel := "+monitor master"
|
||||
|
||||
By("starting redis bootstrap")
|
||||
framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag)
|
||||
err := framework.WaitForPodNameRunningInNamespace(c, bootstrapPodName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("setting up services and controllers")
|
||||
framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{sentinelRC: "true"}))
|
||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
label = labels.SelectorFromSet(labels.Set(map[string]string{"name": redisRC}))
|
||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling up the deployment")
|
||||
framework.RunKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag)
|
||||
framework.RunKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag)
|
||||
framework.WaitForRCToStabilize(c, ns, redisRC, framework.PodReadyBeforeTimeout)
|
||||
framework.WaitForRCToStabilize(c, ns, sentinelRC, framework.PodReadyBeforeTimeout)
|
||||
|
||||
By("checking up the services")
|
||||
checkAllLogs := func() {
|
||||
selectorKey, selectorValue := "name", redisRC
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
|
||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
|
||||
if pod.Name != bootstrapPodName {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
selectorKey, selectorValue = sentinelRC, "true"
|
||||
label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
|
||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
|
||||
if pod.Name != bootstrapPodName {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
}
|
||||
checkAllLogs()
|
||||
|
||||
By("turning down bootstrap")
|
||||
framework.RunKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag)
|
||||
err = framework.WaitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("waiting for the new master election")
|
||||
checkAllLogs()
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Spark", func() {
|
||||
It("should start spark master, driver and workers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/spark", file)
|
||||
}
|
||||
|
||||
// TODO: Add Zepplin and Web UI to this example.
|
||||
serviceYaml := mkpath("spark-master-service.yaml")
|
||||
masterYaml := mkpath("spark-master-controller.yaml")
|
||||
workerControllerYaml := mkpath("spark-worker-controller.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
master := func() {
|
||||
By("starting master")
|
||||
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag)
|
||||
selectorKey, selectorValue := "component", "spark-master"
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
|
||||
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
framework.Logf("Now polling for Master startup...")
|
||||
// Only one master pod: But its a natural way to look up pod names.
|
||||
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
|
||||
framework.Logf("Now waiting for master to startup in %v", pod.Name)
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
By("waiting for master endpoint")
|
||||
err = framework.WaitForEndpoint(c, ns, "spark-master")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
|
||||
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
|
||||
if maErr != nil {
|
||||
framework.Failf("Didn't find target string. error: %v", maErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
worker := func() {
|
||||
By("starting workers")
|
||||
framework.Logf("Now starting Workers")
|
||||
framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag)
|
||||
selectorKey, selectorValue := "component", "spark-worker"
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
|
||||
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// For now, scaling is orthogonal to the core test.
|
||||
// framework.ScaleRC(c, ns, "spark-worker-controller", 2, true)
|
||||
|
||||
framework.Logf("Now polling for worker startup...")
|
||||
forEachPod(selectorKey, selectorValue,
|
||||
func(pod v1.Pod) {
|
||||
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
|
||||
Expect(slaveErr).NotTo(HaveOccurred())
|
||||
})
|
||||
}
|
||||
// Run the worker verification after we turn up the master.
|
||||
defer worker()
|
||||
master()
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Cassandra", func() {
|
||||
It("should create and scale cassandra", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
|
||||
}
|
||||
serviceYaml := mkpath("cassandra-service.yaml")
|
||||
controllerYaml := mkpath("cassandra-controller.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
By("Starting the cassandra service")
|
||||
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
|
||||
framework.Logf("wait for service")
|
||||
err := framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create an RC with n nodes in it. Each node will then be verified.
|
||||
By("Creating a Cassandra RC")
|
||||
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
|
||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
forEachPod("app", "cassandra", func(pod v1.Pod) {
|
||||
framework.Logf("Verifying pod %v ", pod.Name)
|
||||
// TODO how do we do this better? Ready Probe?
|
||||
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
By("Finding each node in the nodetool status lines")
|
||||
forEachPod("app", "cassandra", func(pod v1.Pod) {
|
||||
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
|
||||
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
|
||||
if matched != true {
|
||||
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("CassandraStatefulSet", func() {
|
||||
It("should create statefulset", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
|
||||
}
|
||||
serviceYaml := mkpath("cassandra-service.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
// have to change dns prefix because of the dynamic namespace
|
||||
input := generated.ReadOrDie(mkpath("cassandra-statefulset.yaml"))
|
||||
|
||||
output := strings.Replace(string(input), "cassandra-0.cassandra.default.svc.cluster.local", "cassandra-0.cassandra."+ns+".svc.cluster.local", -1)
|
||||
|
||||
statefulsetYaml := "/tmp/cassandra-statefulset.yaml"
|
||||
|
||||
err := ioutil.WriteFile(statefulsetYaml, []byte(output), 0644)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Starting the cassandra service")
|
||||
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
|
||||
framework.Logf("wait for service")
|
||||
err = framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create an StatefulSet with n nodes in it. Each node will then be verified.
|
||||
By("Creating a Cassandra StatefulSet")
|
||||
|
||||
framework.RunKubectlOrDie("create", "-f", statefulsetYaml, nsFlag)
|
||||
|
||||
statefulsetPoll := 30 * time.Second
|
||||
statefulsetTimeout := 10 * time.Minute
|
||||
// TODO - parse this number out of the yaml
|
||||
numPets := 3
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
|
||||
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
|
||||
func() (bool, error) {
|
||||
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: label.String()})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
if len(podList.Items) < numPets {
|
||||
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
|
||||
return false, nil
|
||||
}
|
||||
if len(podList.Items) > numPets {
|
||||
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
isReady := podutil.IsPodReady(&p)
|
||||
if p.Status.Phase != v1.PodRunning || !isReady {
|
||||
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Finding each node in the nodetool status lines")
|
||||
forEachPod("app", "cassandra", func(pod v1.Pod) {
|
||||
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
|
||||
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
|
||||
if matched != true {
|
||||
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
|
||||
}
|
||||
})
|
||||
// using out of statefulset e2e as deleting pvc is a pain
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Storm", func() {
|
||||
It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/storm", file)
|
||||
}
|
||||
zookeeperServiceJson := mkpath("zookeeper-service.json")
|
||||
zookeeperPodJson := mkpath("zookeeper.json")
|
||||
nimbusServiceJson := mkpath("storm-nimbus-service.json")
|
||||
nimbusPodJson := mkpath("storm-nimbus.json")
|
||||
workerControllerJson := mkpath("storm-worker-controller.json")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
zookeeperPod := "zookeeper"
|
||||
nimbusPod := "nimbus"
|
||||
|
||||
By("starting Zookeeper")
|
||||
framework.RunKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag)
|
||||
err := f.WaitForPodRunningSlow(zookeeperPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking if zookeeper is up and running")
|
||||
_, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForEndpoint(c, ns, "zookeeper")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("starting Nimbus")
|
||||
framework.RunKubectlOrDie("create", "-f", nimbusPodJson, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag)
|
||||
err = f.WaitForPodRunningSlow(nimbusPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = framework.WaitForEndpoint(c, ns, "nimbus")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("starting workers")
|
||||
framework.RunKubectlOrDie("create", "-f", workerControllerJson, nsFlag)
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "storm-worker"}))
|
||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
forEachPod("name", "storm-worker", func(pod v1.Pod) {
|
||||
//do nothing, just wait for the pod to be running
|
||||
})
|
||||
// TODO: Add logging configuration to nimbus & workers images and then
|
||||
// look for a string instead of sleeping.
|
||||
time.Sleep(20 * time.Second)
|
||||
|
||||
By("checking if there are established connections to Zookeeper")
|
||||
_, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking if Nimbus responds to requests")
|
||||
framework.LookForString("No topologies running.", time.Minute, func() string {
|
||||
return framework.RunKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Liveness", func() {
|
||||
It("liveness pods should be automatically restarted", func() {
|
||||
mkpath := func(file string) string {
|
||||
path := filepath.Join("test/fixtures/doc-yaml/user-guide/liveness", file)
|
||||
framework.ExpectNoError(createFileForGoBinData(path, path))
|
||||
return path
|
||||
}
|
||||
execYaml := mkpath("exec-liveness.yaml")
|
||||
httpYaml := mkpath("http-liveness.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, execYaml), nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, httpYaml), nsFlag)
|
||||
|
||||
// Since both containers start rapidly, we can easily run this test in parallel.
|
||||
var wg sync.WaitGroup
|
||||
passed := true
|
||||
checkRestart := func(podName string, timeout time.Duration) {
|
||||
err := framework.WaitForPodNameRunningInNamespace(c, podName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
|
||||
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
|
||||
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
|
||||
if stat.RestartCount > 0 {
|
||||
framework.Logf("Saw %v restart, succeeded...", podName)
|
||||
wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
framework.Logf("Failed waiting for %v restart! ", podName)
|
||||
passed = false
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
By("Check restarts")
|
||||
|
||||
// Start the "actual test", and wait for both pods to complete.
|
||||
// If 2 fail: Something is broken with the test (or maybe even with liveness).
|
||||
// If 1 fails: Its probably just an error in the examples/ files themselves.
|
||||
wg.Add(2)
|
||||
for _, c := range []string{"liveness-http", "liveness-exec"} {
|
||||
go checkRestart(c, 2*time.Minute)
|
||||
}
|
||||
wg.Wait()
|
||||
if !passed {
|
||||
framework.Failf("At least one liveness example failed. See the logs above.")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Secret", func() {
|
||||
It("should create a pod that reads a secret", func() {
|
||||
mkpath := func(file string) string {
|
||||
path := filepath.Join("test/fixtures/doc-yaml/user-guide/secrets", file)
|
||||
framework.ExpectNoError(createFileForGoBinData(path, path))
|
||||
return path
|
||||
}
|
||||
secretYaml := mkpath("secret.yaml")
|
||||
podYaml := mkpath("secret-pod.yaml")
|
||||
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
podName := "secret-test-pod"
|
||||
|
||||
By("creating secret and pod")
|
||||
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, secretYaml), nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, podYaml), nsFlag)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking if secret was read correctly")
|
||||
_, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Downward API", func() {
|
||||
It("should create a pod that prints his name and namespace", func() {
|
||||
mkpath := func(file string) string {
|
||||
path := filepath.Join("test/fixtures/doc-yaml/user-guide/downward-api", file)
|
||||
framework.ExpectNoError(createFileForGoBinData(path, path))
|
||||
return path
|
||||
}
|
||||
podYaml := mkpath("dapi-pod.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
podName := "dapi-test-pod"
|
||||
|
||||
By("creating the pod")
|
||||
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, podYaml), nsFlag)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("checking if name and namespace were passed correctly")
|
||||
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("RethinkDB", func() {
|
||||
It("should create and stop rethinkdb servers", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/rethinkdb", file)
|
||||
}
|
||||
driverServiceYaml := mkpath("driver-service.yaml")
|
||||
rethinkDbControllerYaml := mkpath("rc.yaml")
|
||||
adminPodYaml := mkpath("admin-pod.yaml")
|
||||
adminServiceYaml := mkpath("admin-service.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
By("starting rethinkdb")
|
||||
framework.RunKubectlOrDie("create", "-f", driverServiceYaml, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag)
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"db": "rethinkdb"}))
|
||||
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
checkDbInstances := func() {
|
||||
forEachPod("db", "rethinkdb", func(pod v1.Pod) {
|
||||
_, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
}
|
||||
checkDbInstances()
|
||||
err = framework.WaitForEndpoint(c, ns, "rethinkdb-driver")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling rethinkdb")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true)
|
||||
checkDbInstances()
|
||||
|
||||
By("starting admin")
|
||||
framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag)
|
||||
err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
checkDbInstances()
|
||||
content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") {
|
||||
framework.Failf("RethinkDB console is not running")
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
framework.KubeDescribe("Hazelcast", func() {
|
||||
It("should create and scale hazelcast", func() {
|
||||
mkpath := func(file string) string {
|
||||
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/hazelcast", file)
|
||||
}
|
||||
serviceYaml := mkpath("hazelcast-service.yaml")
|
||||
deploymentYaml := mkpath("hazelcast-deployment.yaml")
|
||||
nsFlag := fmt.Sprintf("--namespace=%v", ns)
|
||||
|
||||
By("starting hazelcast")
|
||||
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
|
||||
framework.RunKubectlOrDie("create", "-f", deploymentYaml, nsFlag)
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"}))
|
||||
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
forEachPod("name", "hazelcast", func(pod v1.Pod) {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
_, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
err = framework.WaitForEndpoint(c, ns, "hazelcast")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("scaling hazelcast")
|
||||
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true)
|
||||
forEachPod("name", "hazelcast", func(pod v1.Pod) {
|
||||
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func makeHttpRequestToService(c clientset.Interface, ns, service, path string, timeout time.Duration) (string, error) {
|
||||
var result []byte
|
||||
var err error
|
||||
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
break
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
result, err = proxyRequest.Namespace(ns).
|
||||
Context(ctx).
|
||||
Name(service).
|
||||
Suffix(path).
|
||||
Do().
|
||||
Raw()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return string(result), err
|
||||
}
|
||||
|
||||
func createFileForGoBinData(gobindataPath, outputFilename string) error {
|
||||
data := generated.ReadOrDie(gobindataPath)
|
||||
if len(data) == 0 {
|
||||
return fmt.Errorf("Failed to read gobindata from %v", gobindataPath)
|
||||
}
|
||||
fullPath := filepath.Join(framework.TestContext.OutputDir, outputFilename)
|
||||
err := os.MkdirAll(filepath.Dir(fullPath), 0777)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while creating directory %v: %v", filepath.Dir(fullPath), err)
|
||||
}
|
||||
err = ioutil.WriteFile(fullPath, data, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error while trying to write to file %v: %v", fullPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
167
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
Normal file
167
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"authorizer_util.go",
|
||||
"cleanup.go",
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"firewall_util.go",
|
||||
"framework.go",
|
||||
"get-kubemark-resource-usage.go",
|
||||
"google_compute.go",
|
||||
"gpu_util.go",
|
||||
"ingress_utils.go",
|
||||
"jobs_util.go",
|
||||
"kubelet_stats.go",
|
||||
"log_size_monitoring.go",
|
||||
"metrics_util.go",
|
||||
"networking_utils.go",
|
||||
"nodes_util.go",
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"psp_util.go",
|
||||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"rs_util.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"statefulset_utils.go",
|
||||
"test_context.go",
|
||||
"upgrade_util.go",
|
||||
"util.go",
|
||||
"volume_util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/node:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/github.com/spf13/viper:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
151
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
Normal file
151
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1beta1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1beta1"
|
||||
v1beta1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
policyCachePollInterval = 100 * time.Millisecond
|
||||
policyCachePollTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error {
|
||||
return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed)
|
||||
}
|
||||
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error {
|
||||
review := &authorizationv1beta1.SubjectAccessReview{
|
||||
Spec: authorizationv1beta1.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationv1beta1.ResourceAttributes{
|
||||
Group: resource.Group,
|
||||
Verb: verb,
|
||||
Resource: resource.Resource,
|
||||
Namespace: namespace,
|
||||
Name: resourceName,
|
||||
},
|
||||
User: user,
|
||||
},
|
||||
}
|
||||
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
|
||||
response, err := c.SubjectAccessReviews().Create(review)
|
||||
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
|
||||
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("SubjectAccessReview endpoint is missing\n")
|
||||
time.Sleep(1 * time.Second)
|
||||
return true, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if response.Status.Allowed != allowed {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// BindClusterRole binds the cluster role at the cluster scope
|
||||
func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
// BindClusterRoleInNamespace binds the cluster role at the namespace scope
|
||||
func BindClusterRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...)
|
||||
}
|
||||
|
||||
// BindRoleInNamespace binds the role at the namespace scope
|
||||
func BindRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
bindInNamespace(c, "Role", role, ns, subjects...)
|
||||
}
|
||||
|
||||
func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.RoleBindings(ns).Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + role,
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: roleType,
|
||||
Name: role,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
isRBACEnabledOnce sync.Once
|
||||
isRBACEnabled bool
|
||||
)
|
||||
|
||||
func IsRBACEnabled(f *Framework) bool {
|
||||
isRBACEnabledOnce.Do(func() {
|
||||
crs, err := f.ClientSet.RbacV1().ClusterRoles().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
Logf("No ClusteRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
isRBACEnabled = true
|
||||
}
|
||||
})
|
||||
return isRBACEnabled
|
||||
}
|
61
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import "sync"
|
||||
|
||||
type CleanupActionHandle *int
|
||||
|
||||
var cleanupActionsLock sync.Mutex
|
||||
var cleanupActions = map[CleanupActionHandle]func(){}
|
||||
|
||||
// AddCleanupAction installs a function that will be called in the event of the
|
||||
// whole test being terminated. This allows arbitrary pieces of the overall
|
||||
// test to hook into SynchronizedAfterSuite().
|
||||
func AddCleanupAction(fn func()) CleanupActionHandle {
|
||||
p := CleanupActionHandle(new(int))
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
cleanupActions[p] = fn
|
||||
return p
|
||||
}
|
||||
|
||||
// RemoveCleanupAction removes a function that was installed by
|
||||
// AddCleanupAction.
|
||||
func RemoveCleanupAction(p CleanupActionHandle) {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
delete(cleanupActions, p)
|
||||
}
|
||||
|
||||
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
|
||||
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
|
||||
// may remove themselves.
|
||||
func RunCleanupActions() {
|
||||
list := []func(){}
|
||||
func() {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for _, fn := range cleanupActions {
|
||||
list = append(list, fn)
|
||||
}
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
fn()
|
||||
}
|
||||
}
|
297
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
Normal file
297
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
Normal file
@ -0,0 +1,297 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*extensions.Deployment, error) {
|
||||
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, Logf, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to clean up old rcs.
|
||||
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
|
||||
var oldRSs []*extensions.ReplicaSet
|
||||
var d *extensions.Deployment
|
||||
|
||||
pollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
d = deployment
|
||||
|
||||
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(oldRSs) == desiredRSNum, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
||||
logReplicaSetsOfDeployment(d, oldRSs, nil)
|
||||
}
|
||||
return pollErr
|
||||
}
|
||||
|
||||
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
||||
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
|
||||
}
|
||||
|
||||
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
||||
return testutils.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration)
|
||||
}
|
||||
|
||||
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
|
||||
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
|
||||
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
|
||||
// may result in taking longer to relabel a RS.
|
||||
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
|
||||
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName, image string, strategyType extensions.DeploymentStrategyType) *extensions.Deployment {
|
||||
zero := int64(0)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: podLabels},
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: strategyType,
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: imageName,
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and don't check if rolling update strategy is broken.
|
||||
// Rolling update strategy is used only during a rolling update, and can be violated in other situations,
|
||||
// such as shortly after a scaling event or the deployment is just created.
|
||||
func WaitForDeploymentComplete(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentComplete(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
|
||||
// Rolling update strategy should not be broken during a rolling update.
|
||||
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *extensions.Deployment) error {
|
||||
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, Logf, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
|
||||
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||
return testutils.WaitForDeploymentUpdatedReplicasLTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, Poll, pollLongTimeout)
|
||||
}
|
||||
|
||||
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
|
||||
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
|
||||
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
|
||||
return testutils.WaitForDeploymentRollbackCleared(c, ns, deploymentName, Poll, pollShortTimeout)
|
||||
}
|
||||
|
||||
// WatchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with
|
||||
// old pods.
|
||||
func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) error {
|
||||
if d.Spec.Strategy.Type != extensions.RecreateDeploymentStrategyType {
|
||||
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
w, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
status := d.Status
|
||||
|
||||
condition := func(event watch.Event) (bool, error) {
|
||||
d := event.Object.(*extensions.Deployment)
|
||||
status = d.Status
|
||||
|
||||
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.ExtensionsV1beta1())
|
||||
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.ExtensionsV1beta1())
|
||||
if err == nil && nerr == nil {
|
||||
Logf("%+v", d)
|
||||
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
||||
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
|
||||
}
|
||||
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
||||
}
|
||||
|
||||
return *(d.Spec.Replicas) == d.Status.Replicas &&
|
||||
*(d.Spec.Replicas) == d.Status.UpdatedReplicas &&
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
|
||||
}
|
||||
|
||||
func RunDeployment(config testutils.DeploymentConfig) error {
|
||||
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
||||
config.NodeDumpFunc = DumpNodeDebugInfo
|
||||
config.ContainerDumpFunc = LogFailedContainers
|
||||
return testutils.RunDeployment(config)
|
||||
}
|
||||
|
||||
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
|
||||
testutils.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
||||
}
|
||||
|
||||
func WaitForDeploymentRevision(c clientset.Interface, d *extensions.Deployment, targetRevision string) error {
|
||||
err := wait.PollImmediate(Poll, pollLongTimeout, func() (bool, error) {
|
||||
deployment, err := c.ExtensionsV1beta1().Deployments(d.Namespace).Get(d.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
revision := deployment.Annotations[deploymentutil.RevisionAnnotation]
|
||||
return revision == targetRevision, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
|
||||
func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error {
|
||||
return testutils.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image)
|
||||
}
|
||||
|
||||
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*extensions.Deployment, error) {
|
||||
deploymentSpec := MakeDeployment(replicas, podLabels, namespace, pvclaims, false, command)
|
||||
deployment, err := client.Extensions().Deployments(namespace).Create(deploymentSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
Logf("Waiting deployment %q to complete", deploymentSpec.Name)
|
||||
err = WaitForDeploymentComplete(client, deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// MakeDeployment creates a deployment definition based on the namespace. The deployment references the PVC's
|
||||
// name. A slice of BASH commands can be supplied as args to be run by the pod
|
||||
func MakeDeployment(replicas int32, podLabels map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *extensions.Deployment {
|
||||
if len(command) == 0 {
|
||||
command = "while true; do sleep 1; done"
|
||||
}
|
||||
zero := int64(0)
|
||||
deploymentName := "deployment-" + string(uuid.NewUUID())
|
||||
deploymentSpec := &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "busybox",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
var volumeMounts = make([]v1.VolumeMount, len(pvclaims))
|
||||
var volumes = make([]v1.Volume, len(pvclaims))
|
||||
for index, pvclaim := range pvclaims {
|
||||
volumename := fmt.Sprintf("volume%v", index+1)
|
||||
volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}
|
||||
volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
|
||||
}
|
||||
deploymentSpec.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
deploymentSpec.Spec.Template.Spec.Volumes = volumes
|
||||
return deploymentSpec
|
||||
}
|
||||
|
||||
// GetPodsForDeployment gets pods for the given deployment
|
||||
func GetPodsForDeployment(client clientset.Interface, deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.ExtensionsV1beta1())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
if replicaSet == nil {
|
||||
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
|
||||
}
|
||||
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
|
||||
return client.Core().Pods(namespace).List(options)
|
||||
}
|
||||
rsList := []*extensions.ReplicaSet{replicaSet}
|
||||
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
|
||||
}
|
||||
return podList, nil
|
||||
}
|
147
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
Normal file
147
vendor/k8s.io/kubernetes/test/e2e/framework/exec_util.go
generated
vendored
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// ExecOptions passed to ExecWithOptions
|
||||
type ExecOptions struct {
|
||||
Command []string
|
||||
|
||||
Namespace string
|
||||
PodName string
|
||||
ContainerName string
|
||||
|
||||
Stdin io.Reader
|
||||
CaptureStdout bool
|
||||
CaptureStderr bool
|
||||
// If false, whitespace in std{err,out} will be removed.
|
||||
PreserveWhitespace bool
|
||||
}
|
||||
|
||||
// ExecWithOptions executes a command in the specified container,
|
||||
// returning stdout, stderr and error. `options` allowed for
|
||||
// additional parameters to be passed.
|
||||
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
|
||||
Logf("ExecWithOptions %+v", options)
|
||||
|
||||
config, err := LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to load restclient config")
|
||||
|
||||
const tty = false
|
||||
|
||||
req := f.ClientSet.CoreV1().RESTClient().Post().
|
||||
Resource("pods").
|
||||
Name(options.PodName).
|
||||
Namespace(options.Namespace).
|
||||
SubResource("exec").
|
||||
Param("container", options.ContainerName)
|
||||
req.VersionedParams(&v1.PodExecOptions{
|
||||
Container: options.ContainerName,
|
||||
Command: options.Command,
|
||||
Stdin: options.Stdin != nil,
|
||||
Stdout: options.CaptureStdout,
|
||||
Stderr: options.CaptureStderr,
|
||||
TTY: tty,
|
||||
}, legacyscheme.ParameterCodec)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
|
||||
|
||||
if options.PreserveWhitespace {
|
||||
return stdout.String(), stderr.String(), err
|
||||
}
|
||||
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err
|
||||
}
|
||||
|
||||
// ExecCommandInContainerWithFullOutput executes a command in the
|
||||
// specified container and return stdout, stderr and error
|
||||
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
|
||||
return f.ExecWithOptions(ExecOptions{
|
||||
Command: cmd,
|
||||
Namespace: f.Namespace.Name,
|
||||
PodName: podName,
|
||||
ContainerName: containerName,
|
||||
|
||||
Stdin: nil,
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
PreserveWhitespace: false,
|
||||
})
|
||||
}
|
||||
|
||||
// ExecCommandInContainer executes a command in the specified container.
|
||||
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
|
||||
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
|
||||
Logf("Exec stderr: %q", stderr)
|
||||
Expect(err).NotTo(HaveOccurred(),
|
||||
"failed to execute command in pod %v, container %v: %v",
|
||||
podName, containerName, err)
|
||||
return stdout
|
||||
}
|
||||
|
||||
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
|
||||
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
|
||||
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
|
||||
Expect(pod.Spec.Containers).NotTo(BeEmpty())
|
||||
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
|
||||
return f.ExecCommandInPod(podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
|
||||
return f.ExecCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
|
||||
}
|
||||
|
||||
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
|
||||
exec, err := remotecommand.NewSPDYExecutor(config, method, url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return exec.Stream(remotecommand.StreamOptions{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
Tty: tty,
|
||||
})
|
||||
}
|
389
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
Normal file
389
vendor/k8s.io/kubernetes/test/e2e/framework/firewall_util.go
generated
vendored
Normal file
@ -0,0 +1,389 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
FirewallTimeoutDefault = 3 * time.Minute
|
||||
FirewallTestTcpTimeout = time.Duration(1 * time.Second)
|
||||
// Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster
|
||||
FirewallTestHttpPort = int32(29999)
|
||||
FirewallTestUdpPort = int32(29998)
|
||||
)
|
||||
|
||||
// MakeFirewallNameForLBService return the expected firewall name for a LB service.
|
||||
// This should match the formatting of makeFirewallName() in pkg/cloudprovider/providers/gce/gce_loadbalancer.go
|
||||
func MakeFirewallNameForLBService(name string) string {
|
||||
return fmt.Sprintf("k8s-fw-%s", name)
|
||||
}
|
||||
|
||||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
if svc.Spec.LoadBalancerSourceRanges == nil {
|
||||
fw.SourceRanges = []string{"0.0.0.0/0"}
|
||||
} else {
|
||||
fw.SourceRanges = svc.Spec.LoadBalancerSourceRanges
|
||||
}
|
||||
for _, sp := range svc.Spec.Ports {
|
||||
fw.Allowed = append(fw.Allowed, &compute.FirewallAllowed{
|
||||
IPProtocol: strings.ToLower(string(sp.Protocol)),
|
||||
Ports: []string{strconv.Itoa(int(sp.Port))},
|
||||
})
|
||||
}
|
||||
return &fw
|
||||
}
|
||||
|
||||
func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHealthCheck bool) string {
|
||||
return gcecloud.MakeHealthCheckFirewallName(clusterID, name, isNodesHealthCheck)
|
||||
}
|
||||
|
||||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
||||
if !isNodesHealthCheck {
|
||||
healthCheckPort = svc.Spec.HealthCheckNodePort
|
||||
}
|
||||
fw.Allowed = []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{fmt.Sprintf("%d", healthCheckPort)},
|
||||
},
|
||||
}
|
||||
return &fw
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
res, err := gceCloud.GetComputeService().Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.GetComputeService().Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
}
|
||||
|
||||
// GetNodeTags gets k8s node tag from one of the nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) []string {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
Logf("GetNodeTags: Found 0 node.")
|
||||
return []string{}
|
||||
}
|
||||
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
|
||||
}
|
||||
|
||||
// GetInstancePrefix returns the INSTANCE_PREFIX env we set for e2e cluster.
|
||||
// From cluster/gce/config-test.sh, master name is set up using below format:
|
||||
// MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
func GetInstancePrefix(masterName string) (string, error) {
|
||||
if !strings.HasSuffix(masterName, "-master") {
|
||||
return "", fmt.Errorf("unexpected master name format: %v", masterName)
|
||||
}
|
||||
return masterName[:len(masterName)-7], nil
|
||||
}
|
||||
|
||||
// GetClusterName returns the CLUSTER_NAME env we set for e2e cluster.
|
||||
// From cluster/gce/config-test.sh, cluster name is set up using below format:
|
||||
// CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
|
||||
func GetClusterName(instancePrefix string) string {
|
||||
return instancePrefix
|
||||
}
|
||||
|
||||
// GetE2eFirewalls returns all firewall rules we create for an e2e cluster.
|
||||
// From cluster/gce/util.sh, all firewall rules should be consistent with the ones created by startup scripts.
|
||||
func GetE2eFirewalls(masterName, masterTag, nodeTag, network, clusterIpRange string) []*compute.Firewall {
|
||||
instancePrefix, err := GetInstancePrefix(masterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
clusterName := GetClusterName(instancePrefix)
|
||||
|
||||
fws := []*compute.Firewall{}
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: clusterName + "-default-internal-master",
|
||||
SourceRanges: []string{"10.0.0.0/8"},
|
||||
TargetTags: []string{masterTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"1-2379"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"2382-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
Ports: []string{"1-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "icmp",
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: clusterName + "-default-internal-node",
|
||||
SourceRanges: []string{"10.0.0.0/8"},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"1-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
Ports: []string{"1-65535"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "icmp",
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: network + "-default-ssh",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"22"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: masterName + "-etcd",
|
||||
SourceTags: []string{masterTag},
|
||||
TargetTags: []string{masterTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"2380"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"2381"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: masterName + "-https",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
TargetTags: []string{masterTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"443"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: nodeTag + "-all",
|
||||
SourceRanges: []string{clusterIpRange},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "icmp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "esp",
|
||||
},
|
||||
{
|
||||
IPProtocol: "ah",
|
||||
},
|
||||
{
|
||||
IPProtocol: "sctp",
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: nodeTag + "-" + instancePrefix + "-http-alt",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"80"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"8080"},
|
||||
},
|
||||
},
|
||||
})
|
||||
fws = append(fws, &compute.Firewall{
|
||||
Name: nodeTag + "-" + instancePrefix + "-nodeports",
|
||||
SourceRanges: []string{"0.0.0.0/0"},
|
||||
TargetTags: []string{nodeTag},
|
||||
Allowed: []*compute.FirewallAllowed{
|
||||
{
|
||||
IPProtocol: "tcp",
|
||||
Ports: []string{"30000-32767"},
|
||||
},
|
||||
{
|
||||
IPProtocol: "udp",
|
||||
Ports: []string{"30000-32767"},
|
||||
},
|
||||
},
|
||||
})
|
||||
return fws
|
||||
}
|
||||
|
||||
// PackProtocolsPortsFromFirewall packs protocols and ports in an unified way for verification.
|
||||
func PackProtocolsPortsFromFirewall(alloweds []*compute.FirewallAllowed) []string {
|
||||
protocolPorts := []string{}
|
||||
for _, allowed := range alloweds {
|
||||
for _, port := range allowed.Ports {
|
||||
protocolPorts = append(protocolPorts, strings.ToLower(allowed.IPProtocol+"/"+port))
|
||||
}
|
||||
}
|
||||
return protocolPorts
|
||||
}
|
||||
|
||||
// SameStringArray verifies whether two string arrays have the same strings, return error if not.
|
||||
// Order does not matter.
|
||||
// When `include` is set to true, verifies whether result includes all elements from expected.
|
||||
func SameStringArray(result, expected []string, include bool) error {
|
||||
res := sets.NewString(result...)
|
||||
exp := sets.NewString(expected...)
|
||||
if !include {
|
||||
diff := res.Difference(exp)
|
||||
if len(diff) != 0 {
|
||||
return fmt.Errorf("found differences: %v", diff)
|
||||
}
|
||||
} else {
|
||||
if !res.IsSuperset(exp) {
|
||||
return fmt.Errorf("some elements are missing: expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyFirewallRule verifies whether the result firewall is consistent with the expected firewall.
|
||||
// When `portsSubset` is false, match given ports exactly. Otherwise, only check ports are included.
|
||||
func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset bool) error {
|
||||
if res == nil || exp == nil {
|
||||
return fmt.Errorf("res and exp must not be nil")
|
||||
}
|
||||
if res.Name != exp.Name {
|
||||
return fmt.Errorf("incorrect name: %v, expected %v", res.Name, exp.Name)
|
||||
}
|
||||
// Sample Network value: https://www.googleapis.com/compute/v1/projects/{project-id}/global/networks/e2e
|
||||
if !strings.HasSuffix(res.Network, "/"+network) {
|
||||
return fmt.Errorf("incorrect network: %v, expected ends with: %v", res.Network, "/"+network)
|
||||
}
|
||||
if err := SameStringArray(PackProtocolsPortsFromFirewall(res.Allowed),
|
||||
PackProtocolsPortsFromFirewall(exp.Allowed), portsSubset); err != nil {
|
||||
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
|
||||
}
|
||||
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
|
||||
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
|
||||
}
|
||||
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
|
||||
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err)
|
||||
}
|
||||
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
|
||||
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
|
||||
Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
|
||||
var fw *compute.Firewall
|
||||
var err error
|
||||
|
||||
condition := func() (bool, error) {
|
||||
fw, err = gceCloud.GetFirewall(fwName)
|
||||
if err != nil && exist ||
|
||||
err == nil && !exist ||
|
||||
err != nil && !exist && !IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if err := wait.PollImmediate(5*time.Second, timeout, condition); err != nil {
|
||||
return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist)
|
||||
}
|
||||
return fw, nil
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
835
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
Normal file
835
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
Normal file
@ -0,0 +1,835 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
maxKubectlExecRetries = 5
|
||||
// TODO(mikedanese): reset this to 5 minutes once #47135 is resolved.
|
||||
// ref https://github.com/kubernetes/kubernetes/issues/47135
|
||||
DefaultNamespaceDeletionTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||
// Eventual goal is to merge this with integration test framework.
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
ClientPool dynamic.ClientPool
|
||||
|
||||
SkipNamespaceCreation bool // Whether to skip creating a namespace
|
||||
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
|
||||
|
||||
gatherer *containerResourceGatherer
|
||||
// Constraints that passed to a check which is executed after data is gathered to
|
||||
// see if 99% of results are within acceptable bounds. It has to be injected in the test,
|
||||
// as expectations vary greatly. Constraints are grouped by the container names.
|
||||
AddonResourceConstraints map[string]ResourceConstraint
|
||||
|
||||
logsSizeWaitGroup sync.WaitGroup
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a Cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
|
||||
// configuration for framework's client
|
||||
Options FrameworkOptions
|
||||
|
||||
// Place where various additional data is stored during test run to be printed to ReportDir,
|
||||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
kubemarkControllerCloseChannel chan struct{}
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest metrics.MetricsCollection
|
||||
}
|
||||
|
||||
type TestDataSummary interface {
|
||||
SummaryKind() string
|
||||
PrintHumanReadable() string
|
||||
PrintJSON() string
|
||||
}
|
||||
|
||||
type FrameworkOptions struct {
|
||||
ClientQPS float32
|
||||
ClientBurst int
|
||||
GroupVersion *schema.GroupVersion
|
||||
}
|
||||
|
||||
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||
// you (you can write additional before/after each functions).
|
||||
func NewDefaultFramework(baseName string) *Framework {
|
||||
options := FrameworkOptions{
|
||||
ClientQPS: 20,
|
||||
ClientBurst: 50,
|
||||
}
|
||||
return NewFramework(baseName, options, nil)
|
||||
}
|
||||
|
||||
func NewFramework(baseName string, options FrameworkOptions, client clientset.Interface) *Framework {
|
||||
f := &Framework{
|
||||
BaseName: baseName,
|
||||
AddonResourceConstraints: make(map[string]ResourceConstraint),
|
||||
Options: options,
|
||||
ClientSet: client,
|
||||
}
|
||||
|
||||
BeforeEach(f.BeforeEach)
|
||||
AfterEach(f.AfterEach)
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// BeforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) BeforeEach() {
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.AfterEach)
|
||||
if f.ClientSet == nil {
|
||||
By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
if f.Options.GroupVersion != nil {
|
||||
config.GroupVersion = f.Options.GroupVersion
|
||||
}
|
||||
if TestContext.KubeAPIContentType != "" {
|
||||
config.ContentType = TestContext.KubeAPIContentType
|
||||
}
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.ClientPool = dynamic.NewClientPool(config, legacyscheme.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
|
||||
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.KubemarkExternalClusterClientSet = externalClient
|
||||
f.kubemarkControllerCloseChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(f.KubemarkExternalClusterClientSet, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
|
||||
Expect(TestContext.CloudConfig.KubemarkController.WaitForCacheSync(f.kubemarkControllerCloseChannel)).To(BeTrue())
|
||||
go TestContext.CloudConfig.KubemarkController.Run(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
}
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
By("Building a namespace api object")
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
f.Namespace = namespace
|
||||
|
||||
if TestContext.VerifyServiceAccount {
|
||||
By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Logf("Skipping waiting for service account")
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
var err error
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||
ProbeDuration: 15 * time.Second,
|
||||
PrintVerboseLogs: false,
|
||||
}, nil)
|
||||
if err != nil {
|
||||
Logf("Error while creating NewResourceUsageGatherer: %v", err)
|
||||
} else {
|
||||
go f.gatherer.StartGatheringData()
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
f.logsSizeWaitGroup = sync.WaitGroup{}
|
||||
f.logsSizeWaitGroup.Add(1)
|
||||
f.logsSizeCloseChannel = make(chan bool)
|
||||
f.logsSizeVerifier = NewLogsVerifier(f.ClientSet, f.logsSizeCloseChannel)
|
||||
go func() {
|
||||
f.logsSizeVerifier.Run()
|
||||
f.logsSizeWaitGroup.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
gatherMetricsAfterTest := TestContext.GatherMetricsAfterTest == "true" || TestContext.GatherMetricsAfterTest == "master"
|
||||
if gatherMetricsAfterTest && TestContext.IncludeClusterAutoscalerMetrics {
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, !ProviderIs("kubemark"), false, false, false, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping ClusterAutoscaler metrics gathering before test): %v", err)
|
||||
} else {
|
||||
f.clusterAutoscalerMetricsBeforeTest, err = grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab CA metrics before test (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
Logf("Gathered ClusterAutoscaler metrics before test")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) AfterEach() {
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
||||
// DeleteNamespace at the very end in defer, to avoid any
|
||||
// expectation failures preventing deleting the namespace.
|
||||
defer func() {
|
||||
nsDeletionErrors := map[string]error{}
|
||||
// Whether to delete namespace is determined by 3 factors: delete-namespace flag, delete-namespace-on-failure flag and the test result
|
||||
// if delete-namespace set to false, namespace will always be preserved.
|
||||
// if delete-namespace is true and delete-namespace-on-failure is false, namespace will be preserved if test failed.
|
||||
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !CurrentGinkgoTestDescription().Failed) {
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
|
||||
timeout := DefaultNamespaceDeletionTimeout
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.ClientSet, f.ClientPool, ns.Name, timeout); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
nsDeletionErrors[ns.Name] = err
|
||||
} else {
|
||||
Logf("Namespace %v was already deleted", ns.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !TestContext.DeleteNamespace {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
} else {
|
||||
Logf("Found DeleteNamespaceOnFailure=false and current test failed, skipping namespace deletion!")
|
||||
}
|
||||
}
|
||||
|
||||
// Paranoia-- prevent reuse!
|
||||
f.Namespace = nil
|
||||
f.ClientSet = nil
|
||||
f.namespacesToDelete = nil
|
||||
|
||||
// if we had errors deleting, report them now.
|
||||
if len(nsDeletionErrors) != 0 {
|
||||
messages := []string{}
|
||||
for namespaceKey, namespaceErr := range nsDeletionErrors {
|
||||
messages = append(messages, fmt.Sprintf("Couldn't delete ns: %q: %s (%#v)", namespaceKey, namespaceErr, namespaceErr))
|
||||
}
|
||||
Failf(strings.Join(messages, ","))
|
||||
}
|
||||
}()
|
||||
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
if !f.SkipNamespaceCreation {
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
|
||||
logFunc := Logf
|
||||
if TestContext.ReportDir != "" {
|
||||
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
|
||||
} else {
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
|
||||
defer file.Close()
|
||||
if err = file.Chmod(0644); err != nil {
|
||||
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
|
||||
}
|
||||
logFunc = GetLogToFileFunc(file)
|
||||
}
|
||||
} else {
|
||||
By("Dumping a list of prepulled images on each node...")
|
||||
}
|
||||
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
By("Collecting resource usage data")
|
||||
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)
|
||||
defer ExpectNoError(resourceViolationError)
|
||||
f.TestSummaries = append(f.TestSummaries, summary)
|
||||
}
|
||||
|
||||
if TestContext.GatherLogsSizes {
|
||||
By("Gathering log sizes data")
|
||||
close(f.logsSizeCloseChannel)
|
||||
f.logsSizeWaitGroup.Wait()
|
||||
f.TestSummaries = append(f.TestSummaries, f.logsSizeVerifier.GetSummary())
|
||||
}
|
||||
|
||||
if TestContext.GatherMetricsAfterTest != "false" {
|
||||
By("Gathering metrics")
|
||||
// Grab apiserver, scheduler, controller-manager metrics and (optionally) nodes' kubelet metrics.
|
||||
grabMetricsFromKubelets := TestContext.GatherMetricsAfterTest != "master" && !ProviderIs("kubemark")
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, grabMetricsFromKubelets, true, true, true, TestContext.IncludeClusterAutoscalerMetrics)
|
||||
if err != nil {
|
||||
Logf("Failed to create MetricsGrabber (skipping metrics gathering): %v", err)
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
Logf("MetricsGrabber failed to grab some of the metrics: %v", err)
|
||||
}
|
||||
(*MetricsForE2E)(&received).computeClusterAutoscalerMetricsDelta(f.clusterAutoscalerMetricsBeforeTest)
|
||||
f.TestSummaries = append(f.TestSummaries, (*MetricsForE2E)(&received))
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.CloudConfig.KubemarkController != nil {
|
||||
close(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
|
||||
PrintSummaries(f.TestSummaries, f.BaseName)
|
||||
|
||||
// Check whether all nodes are ready after the test.
|
||||
// This is explicitly done at the very end of the test, to avoid
|
||||
// e.g. not removing namespace in case of this failure.
|
||||
if err := AllNodesReady(f.ClientSet, 3*time.Minute); err != nil {
|
||||
Failf("All nodes should be ready after test, %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
|
||||
createTestingNS := TestContext.CreateTestingNS
|
||||
if createTestingNS == nil {
|
||||
createTestingNS = CreateTestingNS
|
||||
}
|
||||
ns, err := createTestingNS(baseName, f.ClientSet, labels)
|
||||
// check ns instead of err to see if it's nil as we may
|
||||
// fail to create serviceAccount in it.
|
||||
// In this case, we should not forget to delete the namespace.
|
||||
if ns != nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
|
||||
if !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f, ns.Name)
|
||||
}
|
||||
|
||||
return ns, err
|
||||
}
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.ClientSet, podName, reason, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNotFound waits for the pod to be completely terminated (not "Get-able").
|
||||
func (f *Framework) WaitForPodNotFound(podName string, timeout time.Duration) error {
|
||||
return waitForPodNotFoundInNamespace(f.ClientSet, podName, f.Namespace.Name, timeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunning waits for the pod to run in the namespace.
|
||||
func (f *Framework) WaitForPodRunning(podName string) error {
|
||||
return WaitForPodNameRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodReady waits for the pod to flip to ready in the namespace.
|
||||
func (f *Framework) WaitForPodReady(podName string) error {
|
||||
return waitTimeoutForPodReadyInNamespace(f.ClientSet, podName, f.Namespace.Name, PodStartTimeout)
|
||||
}
|
||||
|
||||
// WaitForPodRunningSlow waits for the pod to run in the namespace.
|
||||
// It has a longer timeout then WaitForPodRunning (util.slowPodStartTimeout).
|
||||
func (f *Framework) WaitForPodRunningSlow(podName string) error {
|
||||
return waitForPodRunningInNamespaceSlow(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// WaitForPodNoLongerRunning waits for the pod to no longer be running in the namespace, for either
|
||||
// success or failure.
|
||||
func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
|
||||
return WaitForPodNoLongerRunningInNamespace(f.ClientSet, podName, f.Namespace.Name)
|
||||
}
|
||||
|
||||
// TestContainerOutput runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a substring matcher.
|
||||
func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring)
|
||||
}
|
||||
|
||||
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
|
||||
// for all of the containers in the podSpec to move into the 'Success' status, and tests
|
||||
// the specified container log against the given expected output using a regexp matcher.
|
||||
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
|
||||
}
|
||||
|
||||
// Write a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// Because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
By("writing a file in the container")
|
||||
allowedCharacters := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
for _, c := range contents {
|
||||
if !strings.ContainsRune(allowedCharacters, c) {
|
||||
return fmt.Errorf("Unsupported character in string to write: %v", c)
|
||||
}
|
||||
}
|
||||
command := fmt.Sprintf("echo '%s' > '%s'", contents, path)
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "/bin/sh", "-c", command)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read a file using kubectl exec cat <path>
|
||||
func (f *Framework) ReadFileViaContainer(podName, containerName string, path string) (string, error) {
|
||||
By("reading a file in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "cat", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path string) (string, error) {
|
||||
By("checking a file size in the container")
|
||||
|
||||
stdout, stderr, err := kubectlExecWithRetry(f.Namespace.Name, podName, containerName, "--", "ls", "-l", path)
|
||||
if err != nil {
|
||||
Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
|
||||
}
|
||||
return string(stdout), err
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
|
||||
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (error, *v1.Service) {
|
||||
var err error = nil
|
||||
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
|
||||
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
|
||||
if block {
|
||||
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, f.Namespace.Name, labels.SelectorFromSet(labels.Set(theService.Spec.Selector)))
|
||||
}
|
||||
return err, theService
|
||||
}
|
||||
|
||||
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
|
||||
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
|
||||
if appName == "" {
|
||||
panic(fmt.Sprintf("no app name provided"))
|
||||
}
|
||||
|
||||
serviceSelector := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
|
||||
// For convenience, user sending ports are optional.
|
||||
portsFunc := func() []v1.ServicePort {
|
||||
if contPort < 1 || svcPort < 1 {
|
||||
return nil
|
||||
} else {
|
||||
return []v1.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Port: int32(svcPort),
|
||||
TargetPort: intstr.FromInt(contPort),
|
||||
}}
|
||||
}
|
||||
}
|
||||
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
|
||||
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-for-" + appName,
|
||||
Labels: map[string]string{
|
||||
"app": appName + "-service",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: portsFunc(),
|
||||
Selector: serviceSelector,
|
||||
},
|
||||
})
|
||||
ExpectNoError(err)
|
||||
return service
|
||||
}
|
||||
|
||||
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
labels := map[string]string{
|
||||
"app": appName + "-pod",
|
||||
}
|
||||
for i, node := range nodes.Items {
|
||||
// one per node, but no more than maxCount.
|
||||
if i <= maxCount {
|
||||
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf(appName+"-pod-%v", i),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: podSpec(node),
|
||||
})
|
||||
ExpectNoError(err)
|
||||
}
|
||||
}
|
||||
return labels
|
||||
}
|
||||
|
||||
type KubeUser struct {
|
||||
Name string `yaml:"name"`
|
||||
User struct {
|
||||
Username string `yaml:"username"`
|
||||
Password string `yaml:"password"`
|
||||
Token string `yaml:"token"`
|
||||
} `yaml:"user"`
|
||||
}
|
||||
|
||||
type KubeCluster struct {
|
||||
Name string `yaml:"name"`
|
||||
Cluster struct {
|
||||
CertificateAuthorityData string `yaml:"certificate-authority-data"`
|
||||
Server string `yaml:"server"`
|
||||
} `yaml:"cluster"`
|
||||
}
|
||||
|
||||
type KubeConfig struct {
|
||||
Contexts []struct {
|
||||
Name string `yaml:"name"`
|
||||
Context struct {
|
||||
Cluster string `yaml:"cluster"`
|
||||
User string
|
||||
} `yaml:"context"`
|
||||
} `yaml:"contexts"`
|
||||
|
||||
Clusters []KubeCluster `yaml:"clusters"`
|
||||
|
||||
Users []KubeUser `yaml:"users"`
|
||||
}
|
||||
|
||||
func (kc *KubeConfig) FindUser(name string) *KubeUser {
|
||||
for _, user := range kc.Users {
|
||||
if user.Name == name {
|
||||
return &user
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (kc *KubeConfig) FindCluster(name string) *KubeCluster {
|
||||
for _, cluster := range kc.Clusters {
|
||||
if cluster.Name == name {
|
||||
return &cluster
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
|
||||
if numRetries > 0 {
|
||||
Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
|
||||
}
|
||||
|
||||
stdOutBytes, stdErrBytes, err := kubectlExec(namespace, podName, containerName, args...)
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
|
||||
// Retry on "i/o timeout" errors
|
||||
Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
continue
|
||||
}
|
||||
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
|
||||
// Retry on "container not found" errors
|
||||
Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return stdOutBytes, stdErrBytes, err
|
||||
}
|
||||
err := fmt.Errorf("Failed: kubectl exec failed %d times with \"i/o timeout\". Giving up.", maxKubectlExecRetries)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
func kubectlExec(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmdArgs := []string{
|
||||
"exec",
|
||||
fmt.Sprintf("--namespace=%v", namespace),
|
||||
podName,
|
||||
fmt.Sprintf("-c=%v", containerName),
|
||||
}
|
||||
cmdArgs = append(cmdArgs, args...)
|
||||
|
||||
cmd := KubectlCmd(cmdArgs...)
|
||||
cmd.Stdout, cmd.Stderr = &stdout, &stderr
|
||||
|
||||
Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
|
||||
err := cmd.Run()
|
||||
return stdout.Bytes(), stderr.Bytes(), err
|
||||
}
|
||||
|
||||
// Wrapper function for ginkgo describe. Adds namespacing.
|
||||
// TODO: Support type safe tagging as well https://github.com/kubernetes/kubernetes/pull/22401.
|
||||
func KubeDescribe(text string, body func()) bool {
|
||||
return Describe("[k8s.io] "+text, body)
|
||||
}
|
||||
|
||||
// Wrapper function for ginkgo It. Adds "[Conformance]" tag and makes static analysis easier.
|
||||
func ConformanceIt(text string, body interface{}, timeout ...float64) bool {
|
||||
return It(text+" [Conformance]", body, timeout...)
|
||||
}
|
||||
|
||||
// PodStateVerification represents a verification of pod state.
|
||||
// Any time you have a set of pods that you want to operate against or query,
|
||||
// this struct can be used to declaratively identify those pods.
|
||||
type PodStateVerification struct {
|
||||
// Optional: only pods that have k=v labels will pass this filter.
|
||||
Selectors map[string]string
|
||||
|
||||
// Required: The phases which are valid for your pod.
|
||||
ValidPhases []v1.PodPhase
|
||||
|
||||
// Optional: only pods passing this function will pass the filter
|
||||
// Verify a pod.
|
||||
// As an optimization, in addition to specfying filter (boolean),
|
||||
// this function allows specifying an error as well.
|
||||
// The error indicates that the polling of the pod spectrum should stop.
|
||||
Verify func(v1.Pod) (bool, error)
|
||||
|
||||
// Optional: only pods with this name will pass the filter.
|
||||
PodName string
|
||||
}
|
||||
|
||||
type ClusterVerification struct {
|
||||
client clientset.Interface
|
||||
namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each.
|
||||
podState PodStateVerification
|
||||
}
|
||||
|
||||
func (f *Framework) NewClusterVerification(namespace *v1.Namespace, filter PodStateVerification) *ClusterVerification {
|
||||
return &ClusterVerification{
|
||||
f.ClientSet,
|
||||
namespace,
|
||||
filter,
|
||||
}
|
||||
}
|
||||
|
||||
func passesPodNameFilter(pod v1.Pod, name string) bool {
|
||||
return name == "" || strings.Contains(pod.Name, name)
|
||||
}
|
||||
|
||||
func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) {
|
||||
if verify == nil {
|
||||
return true, nil
|
||||
} else {
|
||||
verified, err := verify(pod)
|
||||
// If an error is returned, by definition, pod verification fails
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
return verified, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
|
||||
passesPhaseFilter := false
|
||||
for _, phase := range validPhases {
|
||||
if pod.Status.Phase == phase {
|
||||
passesPhaseFilter = true
|
||||
}
|
||||
}
|
||||
return passesPhaseFilter
|
||||
}
|
||||
|
||||
// filterLabels returns a list of pods which have labels.
|
||||
func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
|
||||
var err error
|
||||
var selector labels.Selector
|
||||
var pl *v1.PodList
|
||||
// List pods based on selectors. This might be a tiny optimization rather then filtering
|
||||
// everything manually.
|
||||
if len(selectors) > 0 {
|
||||
selector = labels.SelectorFromSet(labels.Set(selectors))
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pl, err = cli.CoreV1().Pods(ns).List(options)
|
||||
} else {
|
||||
pl, err = cli.CoreV1().Pods(ns).List(metav1.ListOptions{})
|
||||
}
|
||||
return pl, err
|
||||
}
|
||||
|
||||
// filter filters pods which pass a filter. It can be used to compose
|
||||
// the more useful abstractions like ForEach, WaitFor, and so on, which
|
||||
// can be used directly by tests.
|
||||
func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
|
||||
if len(p.ValidPhases) == 0 || namespace == nil {
|
||||
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
|
||||
}
|
||||
|
||||
ns := namespace.Name
|
||||
pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
|
||||
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
|
||||
if len(pl.Items) == 0 || err != nil {
|
||||
return pl.Items, err
|
||||
}
|
||||
|
||||
unfilteredPods := pl.Items
|
||||
filteredPods := []v1.Pod{}
|
||||
ReturnPodsSoFar:
|
||||
// Next: Pod must match at least one of the states that the user specified
|
||||
for _, pod := range unfilteredPods {
|
||||
if !(passesPhasesFilter(pod, p.ValidPhases) && passesPodNameFilter(pod, p.PodName)) {
|
||||
continue
|
||||
}
|
||||
passesVerify, err := passesVerifyFilter(pod, p.Verify)
|
||||
if err != nil {
|
||||
Logf("Error detected on %v : %v !", pod.Name, err)
|
||||
break ReturnPodsSoFar
|
||||
}
|
||||
if passesVerify {
|
||||
filteredPods = append(filteredPods, pod)
|
||||
}
|
||||
}
|
||||
return filteredPods, err
|
||||
}
|
||||
|
||||
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
|
||||
// definition.
|
||||
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
|
||||
pods := []v1.Pod{}
|
||||
var returnedErr error
|
||||
|
||||
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
pods, returnedErr = cl.podState.filter(cl.client, cl.namespace)
|
||||
|
||||
// Failure
|
||||
if returnedErr != nil {
|
||||
Logf("Cutting polling short: We got an error from the pod filtering layer.")
|
||||
// stop polling if the pod filtering returns an error. that should never happen.
|
||||
// it indicates, for example, that the client is broken or something non-pod related.
|
||||
return false, returnedErr
|
||||
}
|
||||
Logf("Found %v / %v", len(pods), atLeast)
|
||||
|
||||
// Success
|
||||
if len(pods) >= atLeast {
|
||||
return true, nil
|
||||
}
|
||||
// Keep trying...
|
||||
return false, nil
|
||||
})
|
||||
Logf("WaitFor completed with timeout %v. Pods found = %v out of %v", timeout, len(pods), atLeast)
|
||||
return pods, err
|
||||
}
|
||||
|
||||
// WaitForOrFail provides a shorthand WaitFor with failure as an option if anything goes wrong.
|
||||
func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration) {
|
||||
pods, err := cl.WaitFor(atLeast, timeout)
|
||||
if err != nil || len(pods) < atLeast {
|
||||
Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ForEach runs a function against every verifiable pod. Be warned that this doesn't wait for "n" pods to verifiy,
|
||||
// so it may return very quickly if you have strict pod state requirements.
|
||||
//
|
||||
// For example, if you require at least 5 pods to be running before your test will pass,
|
||||
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
|
||||
func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
|
||||
pods, err := cl.podState.filter(cl.client, cl.namespace)
|
||||
if err == nil {
|
||||
if len(pods) == 0 {
|
||||
Failf("No pods matched the filter.")
|
||||
}
|
||||
Logf("ForEach: Found %v pods from the filter. Now looping through them.", len(pods))
|
||||
for _, p := range pods {
|
||||
podFunc(p)
|
||||
}
|
||||
} else {
|
||||
Logf("ForEach: Something went wrong when filtering pods to execute against: %v", err)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetLogToFileFunc is a convenience function that returns a function that have the same interface as
|
||||
// Logf, but writes to a specified file.
|
||||
func GetLogToFileFunc(file *os.File) func(format string, args ...interface{}) {
|
||||
return func(format string, args ...interface{}) {
|
||||
writer := bufio.NewWriter(file)
|
||||
if _, err := fmt.Fprintf(writer, format, args...); err != nil {
|
||||
Logf("Failed to write file %v with test performance data: %v", file.Name(), err)
|
||||
}
|
||||
writer.Flush()
|
||||
}
|
||||
}
|
84
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
Normal file
84
vendor/k8s.io/kubernetes/test/e2e/framework/get-kubemark-resource-usage.go
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type KubemarkResourceUsage struct {
|
||||
Name string
|
||||
MemoryWorkingSetInBytes uint64
|
||||
CPUUsageInCores float64
|
||||
}
|
||||
|
||||
func getMasterUsageByPrefix(prefix string) (string, error) {
|
||||
sshResult, err := SSH(fmt.Sprintf("ps ax -o %%cpu,rss,command | tail -n +2 | grep %v | sed 's/\\s+/ /g'", prefix), GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return sshResult.Stdout, nil
|
||||
}
|
||||
|
||||
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
|
||||
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
|
||||
result := make(map[string]*KubemarkResourceUsage)
|
||||
// Get kuberenetes component resource usage
|
||||
sshResult, err := getMasterUsageByPrefix("kube")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe. %v", err)
|
||||
return nil
|
||||
}
|
||||
scanner := bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var name string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /usr/local/bin/kube-%s", &cpu, &mem, &name)
|
||||
if name != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := name + "/" + name
|
||||
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
// Get etcd resource usage
|
||||
sshResult, err = getMasterUsageByPrefix("bin/etcd")
|
||||
if err != nil {
|
||||
Logf("Error when trying to SSH to master machine. Skipping probe")
|
||||
return nil
|
||||
}
|
||||
scanner = bufio.NewScanner(strings.NewReader(sshResult))
|
||||
for scanner.Scan() {
|
||||
var cpu float64
|
||||
var mem uint64
|
||||
var etcdKind string
|
||||
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /bin/sh -c /usr/local/bin/etcd", &cpu, &mem)
|
||||
dataDirStart := strings.Index(scanner.Text(), "--data-dir")
|
||||
if dataDirStart < 0 {
|
||||
continue
|
||||
}
|
||||
fmt.Sscanf(scanner.Text()[dataDirStart:], "--data-dir=/var/%s", &etcdKind)
|
||||
if etcdKind != "" {
|
||||
// Gatherer expects pod_name/container_name format
|
||||
fullName := "etcd/" + etcdKind
|
||||
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/BUILD
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/BUILD
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["wrapper.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper",
|
||||
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
134
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/test/e2e/framework/ginkgowrapper/wrapper.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
|
||||
// with structured data instead of a constant string.
|
||||
package ginkgowrapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
// FailurePanic is the value that will be panicked from Fail.
|
||||
type FailurePanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
// String makes FailurePanic look like the old Ginkgo panic when printed.
|
||||
func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
|
||||
// Fail wraps ginkgo.Fail so that it panics with more useful
|
||||
// information about the failure. This function will panic with a
|
||||
// FailurePanic.
|
||||
func Fail(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
fp := FailurePanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(fp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Fail(message, skip)
|
||||
}
|
||||
|
||||
// SkipPanic is the value that will be panicked from Skip.
|
||||
type SkipPanic struct {
|
||||
Message string // The failure message passed to Fail
|
||||
Filename string // The filename that is the source of the failure
|
||||
Line int // The line number of the filename that is the source of the failure
|
||||
FullStackTrace string // A full stack trace starting at the source of the failure
|
||||
}
|
||||
|
||||
// String makes SkipPanic look like the old Ginkgo panic when printed.
|
||||
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
|
||||
|
||||
// Skip wraps ginkgo.Skip so that it panics with more useful
|
||||
// information about why the test is being skipped. This function will
|
||||
// panic with a SkipPanic.
|
||||
func Skip(message string, callerSkip ...int) {
|
||||
skip := 1
|
||||
if len(callerSkip) > 0 {
|
||||
skip += callerSkip[0]
|
||||
}
|
||||
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
sp := SkipPanic{
|
||||
Message: message,
|
||||
Filename: file,
|
||||
Line: line,
|
||||
FullStackTrace: pruneStack(skip),
|
||||
}
|
||||
|
||||
defer func() {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
panic(sp)
|
||||
}
|
||||
}()
|
||||
|
||||
ginkgo.Skip(message, skip)
|
||||
}
|
||||
|
||||
// ginkgo adds a lot of test running infrastructure to the stack, so
|
||||
// we filter those out
|
||||
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
|
||||
|
||||
func pruneStack(skip int) string {
|
||||
skip += 2 // one for pruneStack and one for debug.Stack
|
||||
stack := debug.Stack()
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
|
||||
var prunedStack []string
|
||||
|
||||
// skip the top of the stack
|
||||
for i := 0; i < 2*skip+1; i++ {
|
||||
scanner.Scan()
|
||||
}
|
||||
|
||||
for scanner.Scan() {
|
||||
if stackSkipPattern.Match(scanner.Bytes()) {
|
||||
scanner.Scan() // these come in pairs
|
||||
} else {
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
scanner.Scan() // these come in pairs
|
||||
prunedStack = append(prunedStack, scanner.Text())
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(prunedStack, "\n")
|
||||
}
|
198
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
Normal file
198
vendor/k8s.io/kubernetes/test/e2e/framework/google_compute.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
)
|
||||
|
||||
// TODO: These should really just use the GCE API client library or at least use
|
||||
// better formatted output from the --format flag.
|
||||
|
||||
func CreateGCEStaticIP(name string) (string, error) {
|
||||
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
|
||||
// NAME REGION ADDRESS STATUS
|
||||
// test-static-ip us-central1 104.197.143.7 RESERVED
|
||||
|
||||
var outputBytes []byte
|
||||
var err error
|
||||
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to convert zone to region: %v", err)
|
||||
}
|
||||
glog.Infof("Creating static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
|
||||
for attempts := 0; attempts < 4; attempts++ {
|
||||
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
|
||||
name, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--region", region, "-q", "--format=yaml").CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Errorf("output from failed attempt to create static IP: %s", outputBytes)
|
||||
time.Sleep(time.Duration(5*attempts) * time.Second)
|
||||
}
|
||||
if err != nil {
|
||||
// Ditch the error, since the stderr in the output is what actually contains
|
||||
// any useful info.
|
||||
return "", fmt.Errorf("failed to create static IP: %s", outputBytes)
|
||||
}
|
||||
output := string(outputBytes)
|
||||
if strings.Contains(output, "RESERVED") {
|
||||
r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+")
|
||||
staticIP := r.FindString(output)
|
||||
if staticIP == "" {
|
||||
return "", fmt.Errorf("static IP not found in gcloud command output: %v", output)
|
||||
} else {
|
||||
return staticIP, nil
|
||||
}
|
||||
} else {
|
||||
return "", fmt.Errorf("static IP %q could not be reserved: %v", name, output)
|
||||
}
|
||||
}
|
||||
|
||||
func DeleteGCEStaticIP(name string) error {
|
||||
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
|
||||
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
|
||||
// NAME REGION ADDRESS STATUS
|
||||
// test-static-ip us-central1 104.197.143.7 RESERVED
|
||||
|
||||
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert zone to region: %v", err)
|
||||
}
|
||||
glog.Infof("Deleting static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
|
||||
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
|
||||
name, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--region", region, "-q").CombinedOutput()
|
||||
if err != nil {
|
||||
// Ditch the error, since the stderr in the output is what actually contains
|
||||
// any useful info.
|
||||
return fmt.Errorf("failed to delete static IP %q: %v", name, string(outputBytes))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns master & node image string, or error
|
||||
func lookupClusterImageSources() (string, string, error) {
|
||||
// Given args for a gcloud compute command, run it with other args, and return the values,
|
||||
// whether separated by newlines, commas or semicolons.
|
||||
gcloudf := func(argv ...string) ([]string, error) {
|
||||
args := []string{"compute"}
|
||||
args = append(args, argv...)
|
||||
args = append(args, "--project", TestContext.CloudConfig.ProjectID,
|
||||
"--zone", TestContext.CloudConfig.Zone)
|
||||
outputBytes, err := exec.Command("gcloud", args...).CombinedOutput()
|
||||
str := strings.Replace(string(outputBytes), ",", "\n", -1)
|
||||
str = strings.Replace(str, ";", "\n", -1)
|
||||
lines := strings.Split(str, "\n")
|
||||
if err != nil {
|
||||
Logf("lookupDiskImageSources: gcloud error with [%#v]; err:%v", argv, err)
|
||||
for _, l := range lines {
|
||||
Logf(" > %s", l)
|
||||
}
|
||||
}
|
||||
return lines, err
|
||||
}
|
||||
|
||||
// Given a GCE instance, look through its disks, finding one that has a sourceImage
|
||||
host2image := func(instance string) (string, error) {
|
||||
// gcloud compute instances describe {INSTANCE} --format="get(disks[].source)"
|
||||
// gcloud compute disks describe {DISKURL} --format="get(sourceImage)"
|
||||
disks, err := gcloudf("instances", "describe", instance, "--format=get(disks[].source)")
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if len(disks) == 0 {
|
||||
return "", fmt.Errorf("instance %q had no findable disks", instance)
|
||||
}
|
||||
// Loop over disks, looking for the boot disk
|
||||
for _, disk := range disks {
|
||||
lines, err := gcloudf("disks", "describe", disk, "--format=get(sourceImage)")
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if len(lines) > 0 && lines[0] != "" {
|
||||
return lines[0], nil // break, we're done
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("instance %q had no disk with a sourceImage", instance)
|
||||
}
|
||||
|
||||
// gcloud compute instance-groups list-instances {GROUPNAME} --format="get(instance)"
|
||||
nodeName := ""
|
||||
instGroupName := strings.Split(TestContext.CloudConfig.NodeInstanceGroup, ",")[0]
|
||||
if lines, err := gcloudf("instance-groups", "list-instances", instGroupName, "--format=get(instance)"); err != nil {
|
||||
return "", "", err
|
||||
} else if len(lines) == 0 {
|
||||
return "", "", fmt.Errorf("no instances inside instance-group %q", instGroupName)
|
||||
} else {
|
||||
nodeName = lines[0]
|
||||
}
|
||||
|
||||
nodeImg, err := host2image(nodeName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
frags := strings.Split(nodeImg, "/")
|
||||
nodeImg = frags[len(frags)-1]
|
||||
|
||||
// For GKE clusters, MasterName will not be defined; we just leave masterImg blank.
|
||||
masterImg := ""
|
||||
if masterName := TestContext.CloudConfig.MasterName; masterName != "" {
|
||||
img, err := host2image(masterName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
frags = strings.Split(img, "/")
|
||||
masterImg = frags[len(frags)-1]
|
||||
}
|
||||
|
||||
return masterImg, nodeImg, nil
|
||||
}
|
||||
|
||||
func LogClusterImageSources() {
|
||||
masterImg, nodeImg, err := lookupClusterImageSources()
|
||||
if err != nil {
|
||||
Logf("Cluster image sources lookup failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
Logf("cluster-master-image: %s", masterImg)
|
||||
Logf("cluster-node-image: %s", nodeImg)
|
||||
|
||||
images := map[string]string{
|
||||
"master_os_image": masterImg,
|
||||
"node_os_image": nodeImg,
|
||||
}
|
||||
|
||||
outputBytes, _ := json.MarshalIndent(images, "", " ")
|
||||
filePath := filepath.Join(TestContext.ReportDir, "images.json")
|
||||
if err := ioutil.WriteFile(filePath, outputBytes, 0644); err != nil {
|
||||
Logf("cluster images sources, could not write to %q: %v", filePath, err)
|
||||
}
|
||||
}
|
76
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
Normal file
76
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
// GPUResourceName is the extended name of the GPU resource since v1.8
|
||||
// this uses the device plugin mechanism
|
||||
NVIDIAGPUResourceName = "nvidia.com/gpu"
|
||||
|
||||
// TODO: Parametrize it by making it a feature in TestFramework.
|
||||
// so we can override the daemonset in other setups (non COS).
|
||||
// GPUDevicePluginDSYAML is the official Google Device Plugin Daemonset NVIDIA GPU manifest for GKE
|
||||
GPUDevicePluginDSYAML = "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/device-plugins/nvidia-gpu/daemonset.yaml"
|
||||
)
|
||||
|
||||
// TODO make this generic and not linked to COS only
|
||||
// NumberOfGPUs returs the number of GPUs advertised by a node
|
||||
// This is based on the Device Plugin system and expected to run on a COS based node
|
||||
// After the NVIDIA drivers were installed
|
||||
func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
|
||||
val, ok := node.Status.Capacity[NVIDIAGPUResourceName]
|
||||
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
|
||||
return val.Value()
|
||||
}
|
||||
|
||||
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
||||
func NVIDIADevicePlugin(ns string) *v1.Pod {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||
Namespace: ns,
|
||||
},
|
||||
|
||||
Spec: ds.Spec.Template.Spec,
|
||||
}
|
||||
// Remove node affinity
|
||||
p.Spec.Affinity = nil
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func GetGPUDevicePluginImage() string {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
|
||||
return ""
|
||||
}
|
||||
return ds.Spec.Template.Spec.Containers[0].Image
|
||||
}
|
1196
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
Normal file
1196
vendor/k8s.io/kubernetes/test/e2e/framework/ingress_utils.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
259
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
Normal file
259
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
Normal file
@ -0,0 +1,259 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// How long to wait for a job to finish.
|
||||
JobTimeout = 15 * time.Minute
|
||||
|
||||
// Job selector name
|
||||
JobSelectorKey = "job"
|
||||
)
|
||||
|
||||
// NewTestJob returns a Job which does one of several testing behaviors. notTerminate starts a Job that will run
|
||||
// effectively forever. fail starts a Job that will fail immediately. succeed starts a Job that will succeed
|
||||
// immediately. randomlySucceedOrFail starts a Job that will succeed or fail randomly. failOnce fails the Job the
|
||||
// first time it is run and succeeds subsequently. name is the Name of the Job. RestartPolicy indicates the restart
|
||||
// policy of the containers in which the Pod is running. Parallelism is the Job's parallelism, and completions is the
|
||||
// Job's required number of completions.
|
||||
func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32, activeDeadlineSeconds *int64, backoffLimit int32) *batch.Job {
|
||||
job := &batch.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Job",
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
ActiveDeadlineSeconds: activeDeadlineSeconds,
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
BackoffLimit: &backoffLimit,
|
||||
ManualSelector: newBool(false),
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{JobSelectorKey: name},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: rPol,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "data",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "c",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
switch behavior {
|
||||
case "notTerminate":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
|
||||
case "fail":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
|
||||
case "succeed":
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
|
||||
case "randomlySucceedOrFail":
|
||||
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
|
||||
// Dividing by 16384 gives roughly 50/50 chance of success.
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
|
||||
case "failOnce":
|
||||
// Fail the first the container of the pod is run, and
|
||||
// succeed the second time. Checks for file on emptydir.
|
||||
// If present, succeed. If not, create but fail.
|
||||
// Note that this cannot be used with RestartNever because
|
||||
// it always fails the first time for a pod.
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
|
||||
}
|
||||
return job
|
||||
}
|
||||
|
||||
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
|
||||
func GetJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been created.
|
||||
func CreateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Create(job)
|
||||
}
|
||||
|
||||
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
|
||||
// been updated.
|
||||
func UpdateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
|
||||
return c.BatchV1().Jobs(ns).Update(job)
|
||||
}
|
||||
|
||||
// UpdateJobFunc updates the job object. It retries if there is a conflict, throw out error if
|
||||
// there is any other errors. name is the job name, updateFn is the function updating the
|
||||
// job object.
|
||||
func UpdateJobFunc(c clientset.Interface, ns, name string, updateFn func(job *batch.Job)) {
|
||||
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
|
||||
job, err := GetJob(c, ns, name)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
|
||||
}
|
||||
updateFn(job)
|
||||
_, err = UpdateJob(c, ns, job)
|
||||
if err == nil {
|
||||
Logf("Successfully updated job %q", name)
|
||||
return true, nil
|
||||
}
|
||||
if errors.IsConflict(err) {
|
||||
Logf("Conflicting update to job %q, re-get and re-update: %v", name, err)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to update job %q: %v", name, err)
|
||||
}))
|
||||
}
|
||||
|
||||
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
|
||||
// deleted.
|
||||
func DeleteJob(c clientset.Interface, ns, name string) error {
|
||||
return c.BatchV1().Jobs(ns).Delete(name, nil)
|
||||
}
|
||||
|
||||
// GetJobPods returns a list of Pods belonging to a Job.
|
||||
func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
return c.CoreV1().Pods(ns).List(options)
|
||||
}
|
||||
|
||||
// WaitForAllJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use
|
||||
// when pods will run for a long time, or it will be racy.
|
||||
func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
pods, err := GetJobPods(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
count := int32(0)
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count == parallelism, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.Status.Succeeded == completions, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, c := range curr.Status.Conditions {
|
||||
if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
|
||||
if reason == "" || reason == c.Reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
|
||||
// nil the returned bool is true if the Job is running.
|
||||
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
|
||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
count := int32(0)
|
||||
for _, p := range pods.Items {
|
||||
if p.Status.Phase == v1.PodRunning {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count == parallelism, nil
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
p := new(bool)
|
||||
*p = val
|
||||
return p
|
||||
}
|
||||
|
||||
type updateJobFunc func(*batch.Job)
|
||||
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(job); err == nil {
|
||||
Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
852
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
Normal file
852
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
Normal file
@ -0,0 +1,852 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
dockermetrics "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
|
||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
|
||||
// TODO: Get some more structure around the metrics and this type
|
||||
type KubeletLatencyMetric struct {
|
||||
// eg: list, info, create
|
||||
Operation string
|
||||
// eg: sync_pods, pod_worker
|
||||
Method string
|
||||
// 0 <= quantile <=1, e.g. 0.95 is 95%tile, 0.5 is median.
|
||||
Quantile float64
|
||||
Latency time.Duration
|
||||
}
|
||||
|
||||
// KubeletMetricByLatency implements sort.Interface for []KubeletMetric based on
|
||||
// the latency field.
|
||||
type KubeletLatencyMetrics []KubeletLatencyMetric
|
||||
|
||||
func (a KubeletLatencyMetrics) Len() int { return len(a) }
|
||||
func (a KubeletLatencyMetrics) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].Latency }
|
||||
|
||||
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
|
||||
// or else, the function will try to get kubelet metrics directly from the node.
|
||||
func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
if c == nil {
|
||||
return metrics.GrabKubeletMetricsWithoutProxy(nodeName)
|
||||
}
|
||||
grabber, err := metrics.NewMetricsGrabber(c, nil, true, false, false, false, false)
|
||||
if err != nil {
|
||||
return metrics.KubeletMetrics{}, err
|
||||
}
|
||||
return grabber.GrabFromKubelet(nodeName)
|
||||
}
|
||||
|
||||
// getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
|
||||
// the subsystem prefix.
|
||||
func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
|
||||
ms, err := getKubeletMetricsFromNode(c, nodeName)
|
||||
if err != nil {
|
||||
return metrics.KubeletMetrics{}, err
|
||||
}
|
||||
|
||||
kubeletMetrics := make(metrics.KubeletMetrics)
|
||||
for name, samples := range ms {
|
||||
const prefix = kubeletmetrics.KubeletSubsystem + "_"
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
// Not a kubelet metric.
|
||||
continue
|
||||
}
|
||||
method := strings.TrimPrefix(name, prefix)
|
||||
kubeletMetrics[method] = samples
|
||||
}
|
||||
return kubeletMetrics, nil
|
||||
}
|
||||
|
||||
// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis
|
||||
// passed in should not contain subsystem prefix.
|
||||
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
|
||||
latencyMethods := sets.NewString(
|
||||
kubeletmetrics.PodWorkerLatencyKey,
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PodStartLatencyKey,
|
||||
kubeletmetrics.CgroupManagerOperationsKey,
|
||||
dockermetrics.DockerOperationsLatencyKey,
|
||||
kubeletmetrics.PodWorkerStartLatencyKey,
|
||||
kubeletmetrics.PLEGRelistLatencyKey,
|
||||
)
|
||||
return GetKubeletMetrics(ms, latencyMethods)
|
||||
}
|
||||
|
||||
func GetKubeletMetrics(ms metrics.KubeletMetrics, methods sets.String) KubeletLatencyMetrics {
|
||||
var latencyMetrics KubeletLatencyMetrics
|
||||
for method, samples := range ms {
|
||||
if !methods.Has(method) {
|
||||
continue
|
||||
}
|
||||
for _, sample := range samples {
|
||||
latency := sample.Value
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
var quantile float64
|
||||
if val, ok := sample.Metric[model.QuantileLabel]; ok {
|
||||
var err error
|
||||
if quantile, err = strconv.ParseFloat(string(val), 64); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{
|
||||
Operation: operation,
|
||||
Method: method,
|
||||
Quantile: quantile,
|
||||
Latency: time.Duration(int64(latency)) * time.Microsecond,
|
||||
})
|
||||
}
|
||||
}
|
||||
return latencyMetrics
|
||||
}
|
||||
|
||||
// RuntimeOperationMonitor is the tool getting and parsing docker operation metrics.
|
||||
type RuntimeOperationMonitor struct {
|
||||
client clientset.Interface
|
||||
nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate
|
||||
}
|
||||
|
||||
// NodeRuntimeOperationErrorRate is the runtime operation error rate on one node.
|
||||
type NodeRuntimeOperationErrorRate map[string]*RuntimeOperationErrorRate
|
||||
|
||||
// RuntimeOperationErrorRate is the error rate of a specified runtime operation.
|
||||
type RuntimeOperationErrorRate struct {
|
||||
TotalNumber float64
|
||||
ErrorRate float64
|
||||
TimeoutRate float64
|
||||
}
|
||||
|
||||
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
|
||||
m := &RuntimeOperationMonitor{
|
||||
client: c,
|
||||
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
|
||||
}
|
||||
nodes, err := m.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
m.nodesRuntimeOps[node.Name] = make(NodeRuntimeOperationErrorRate)
|
||||
}
|
||||
// Initialize the runtime operation error rate
|
||||
m.GetRuntimeOperationErrorRate()
|
||||
return m
|
||||
}
|
||||
|
||||
// GetRuntimeOperationErrorRate gets runtime operation records from kubelet metrics and calculate
|
||||
// error rates of all runtime operations.
|
||||
func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate {
|
||||
for node := range m.nodesRuntimeOps {
|
||||
nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
|
||||
if err != nil {
|
||||
Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
|
||||
continue
|
||||
}
|
||||
m.nodesRuntimeOps[node] = nodeResult
|
||||
}
|
||||
return m.nodesRuntimeOps
|
||||
}
|
||||
|
||||
// GetLatestRuntimeOperationErrorRate gets latest error rate and timeout rate from last observed RuntimeOperationErrorRate.
|
||||
func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate {
|
||||
result := make(map[string]NodeRuntimeOperationErrorRate)
|
||||
for node := range m.nodesRuntimeOps {
|
||||
result[node] = make(NodeRuntimeOperationErrorRate)
|
||||
oldNodeResult := m.nodesRuntimeOps[node]
|
||||
curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
|
||||
if err != nil {
|
||||
Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
|
||||
continue
|
||||
}
|
||||
for op, cur := range curNodeResult {
|
||||
t := *cur
|
||||
if old, found := oldNodeResult[op]; found {
|
||||
t.ErrorRate = (t.ErrorRate*t.TotalNumber - old.ErrorRate*old.TotalNumber) / (t.TotalNumber - old.TotalNumber)
|
||||
t.TimeoutRate = (t.TimeoutRate*t.TotalNumber - old.TimeoutRate*old.TotalNumber) / (t.TotalNumber - old.TotalNumber)
|
||||
t.TotalNumber -= old.TotalNumber
|
||||
}
|
||||
result[node][op] = &t
|
||||
}
|
||||
m.nodesRuntimeOps[node] = curNodeResult
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FormatRuntimeOperationErrorRate formats the runtime operation error rate to string.
|
||||
func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperationErrorRate) string {
|
||||
lines := []string{}
|
||||
for node, nodeResult := range nodesResult {
|
||||
lines = append(lines, fmt.Sprintf("node %q runtime operation error rate:", node))
|
||||
for op, result := range nodeResult {
|
||||
line := fmt.Sprintf("operation %q: total - %.0f; error rate - %f; timeout rate - %f", op,
|
||||
result.TotalNumber, result.ErrorRate, result.TimeoutRate)
|
||||
lines = append(lines, line)
|
||||
}
|
||||
lines = append(lines, fmt.Sprintln())
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
||||
|
||||
// getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node.
|
||||
func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) {
|
||||
result := make(NodeRuntimeOperationErrorRate)
|
||||
ms, err := getKubeletMetrics(c, node)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
// If no corresponding metrics are found, the returned samples will be empty. Then the following
|
||||
// loop will be skipped automatically.
|
||||
allOps := ms[dockermetrics.DockerOperationsKey]
|
||||
errOps := ms[dockermetrics.DockerOperationsErrorsKey]
|
||||
timeoutOps := ms[dockermetrics.DockerOperationsTimeoutKey]
|
||||
for _, sample := range allOps {
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
result[operation] = &RuntimeOperationErrorRate{TotalNumber: float64(sample.Value)}
|
||||
}
|
||||
for _, sample := range errOps {
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
// Should always find the corresponding item, just in case
|
||||
if _, found := result[operation]; found {
|
||||
result[operation].ErrorRate = float64(sample.Value) / result[operation].TotalNumber
|
||||
}
|
||||
}
|
||||
for _, sample := range timeoutOps {
|
||||
operation := string(sample.Metric["operation_type"])
|
||||
if _, found := result[operation]; found {
|
||||
result[operation].TimeoutRate = float64(sample.Value) / result[operation].TotalNumber
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
|
||||
func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
|
||||
ms, err := getKubeletMetrics(c, nodeName)
|
||||
if err != nil {
|
||||
return KubeletLatencyMetrics{}, err
|
||||
}
|
||||
latencyMetrics := GetKubeletLatencyMetrics(ms)
|
||||
sort.Sort(latencyMetrics)
|
||||
var badMetrics KubeletLatencyMetrics
|
||||
logFunc("\nLatency metrics for node %v", nodeName)
|
||||
for _, m := range latencyMetrics {
|
||||
if m.Latency > threshold {
|
||||
badMetrics = append(badMetrics, m)
|
||||
Logf("%+v", m)
|
||||
}
|
||||
}
|
||||
return badMetrics, nil
|
||||
}
|
||||
|
||||
// getStatsSummary contacts kubelet for the container information.
|
||||
func getStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Context(ctx).
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
summary := stats.Summary{}
|
||||
err = json.Unmarshal(data, &summary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &summary, nil
|
||||
}
|
||||
|
||||
func removeUint64Ptr(ptr *uint64) uint64 {
|
||||
if ptr == nil {
|
||||
return 0
|
||||
}
|
||||
return *ptr
|
||||
}
|
||||
|
||||
// getOneTimeResourceUsageOnNode queries the node's /stats/summary endpoint
|
||||
// and returns the resource usage of all containerNames for the past
|
||||
// cpuInterval.
|
||||
// The acceptable range of the interval is 2s~120s. Be warned that as the
|
||||
// interval (and #containers) increases, the size of kubelet's response
|
||||
// could be significant. E.g., the 60s interval stats for ~20 containers is
|
||||
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
|
||||
//
|
||||
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
|
||||
// stats points to compute the cpu usage over the interval. Assuming cadvisor
|
||||
// polls every second, we'd need to get N stats points for N-second interval.
|
||||
// Note that this is an approximation and may not be accurate, hence we also
|
||||
// write the actual interval used for calculation (based on the timestamps of
|
||||
// the stats points in ContainerResourceUsage.CPUInterval.
|
||||
//
|
||||
// containerNames is a function returning a collection of container names in which
|
||||
// user is interested in.
|
||||
func getOneTimeResourceUsageOnNode(
|
||||
c clientset.Interface,
|
||||
nodeName string,
|
||||
cpuInterval time.Duration,
|
||||
containerNames func() []string,
|
||||
) (ResourceUsagePerContainer, error) {
|
||||
const (
|
||||
// cadvisor records stats about every second.
|
||||
cadvisorStatsPollingIntervalInSeconds float64 = 1.0
|
||||
// cadvisor caches up to 2 minutes of stats (configured by kubelet).
|
||||
maxNumStatsToRequest int = 120
|
||||
)
|
||||
|
||||
numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
|
||||
if numStats < 2 || numStats > maxNumStatsToRequest {
|
||||
return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest)
|
||||
}
|
||||
// Get information of all containers on the node.
|
||||
summary, err := getStatsSummary(c, nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := func(name string, newStats *stats.ContainerStats) *ContainerResourceUsage {
|
||||
if newStats == nil || newStats.CPU == nil || newStats.Memory == nil {
|
||||
return nil
|
||||
}
|
||||
return &ContainerResourceUsage{
|
||||
Name: name,
|
||||
Timestamp: newStats.StartTime.Time,
|
||||
CPUUsageInCores: float64(removeUint64Ptr(newStats.CPU.UsageNanoCores)) / 1000000000,
|
||||
MemoryUsageInBytes: removeUint64Ptr(newStats.Memory.UsageBytes),
|
||||
MemoryWorkingSetInBytes: removeUint64Ptr(newStats.Memory.WorkingSetBytes),
|
||||
MemoryRSSInBytes: removeUint64Ptr(newStats.Memory.RSSBytes),
|
||||
CPUInterval: 0,
|
||||
}
|
||||
}
|
||||
// Process container infos that are relevant to us.
|
||||
containers := containerNames()
|
||||
usageMap := make(ResourceUsagePerContainer, len(containers))
|
||||
observedContainers := []string{}
|
||||
for _, pod := range summary.Pods {
|
||||
for _, container := range pod.Containers {
|
||||
isInteresting := false
|
||||
for _, interestingContainerName := range containers {
|
||||
if container.Name == interestingContainerName {
|
||||
isInteresting = true
|
||||
observedContainers = append(observedContainers, container.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isInteresting {
|
||||
continue
|
||||
}
|
||||
if usage := f(pod.PodRef.Name+"/"+container.Name, &container); usage != nil {
|
||||
usageMap[pod.PodRef.Name+"/"+container.Name] = usage
|
||||
}
|
||||
}
|
||||
}
|
||||
return usageMap, nil
|
||||
}
|
||||
|
||||
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
|
||||
data, err := c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
|
||||
Suffix("stats/summary").
|
||||
SetHeader("Content-Type", "application/json").
|
||||
Do().Raw()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var summary *stats.Summary
|
||||
err = json.Unmarshal(data, &summary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func getSystemContainerStats(summary *stats.Summary) map[string]*stats.ContainerStats {
|
||||
statsList := summary.Node.SystemContainers
|
||||
statsMap := make(map[string]*stats.ContainerStats)
|
||||
for i := range statsList {
|
||||
statsMap[statsList[i].Name] = &statsList[i]
|
||||
}
|
||||
|
||||
// Create a root container stats using information available in
|
||||
// stats.NodeStats. This is necessary since it is a different type.
|
||||
statsMap[rootContainerName] = &stats.ContainerStats{
|
||||
CPU: summary.Node.CPU,
|
||||
Memory: summary.Node.Memory,
|
||||
}
|
||||
return statsMap
|
||||
}
|
||||
|
||||
const (
|
||||
rootContainerName = "/"
|
||||
)
|
||||
|
||||
// A list of containers for which we want to collect resource usage.
|
||||
func TargetContainers() []string {
|
||||
return []string{
|
||||
rootContainerName,
|
||||
stats.SystemContainerRuntime,
|
||||
stats.SystemContainerKubelet,
|
||||
}
|
||||
}
|
||||
|
||||
type ContainerResourceUsage struct {
|
||||
Name string
|
||||
Timestamp time.Time
|
||||
CPUUsageInCores float64
|
||||
MemoryUsageInBytes uint64
|
||||
MemoryWorkingSetInBytes uint64
|
||||
MemoryRSSInBytes uint64
|
||||
// The interval used to calculate CPUUsageInCores.
|
||||
CPUInterval time.Duration
|
||||
}
|
||||
|
||||
func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsage) bool {
|
||||
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
|
||||
}
|
||||
|
||||
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
|
||||
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
|
||||
|
||||
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
|
||||
// Example output:
|
||||
//
|
||||
// Resource usage for node "e2e-test-foo-node-abcde":
|
||||
// container cpu(cores) memory(MB)
|
||||
// "/" 0.363 2942.09
|
||||
// "/docker-daemon" 0.088 521.80
|
||||
// "/kubelet" 0.086 424.37
|
||||
// "/system" 0.007 119.88
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n")
|
||||
for name, s := range containerStats {
|
||||
fmt.Fprintf(w, "%q\t%.3f\t%.2f\t%.2f\n", name, s.CPUUsageInCores, float64(s.MemoryWorkingSetInBytes)/(1024*1024), float64(s.MemoryRSSInBytes)/(1024*1024))
|
||||
}
|
||||
w.Flush()
|
||||
return fmt.Sprintf("Resource usage on node %q:\n%s", nodeName, buf.String())
|
||||
}
|
||||
|
||||
type uint64arr []uint64
|
||||
|
||||
func (a uint64arr) Len() int { return len(a) }
|
||||
func (a uint64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a uint64arr) Less(i, j int) bool { return a[i] < a[j] }
|
||||
|
||||
type usageDataPerContainer struct {
|
||||
cpuData []float64
|
||||
memUseData []uint64
|
||||
memWorkSetData []uint64
|
||||
}
|
||||
|
||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
raw, errRaw := client.Raw()
|
||||
if errRaw != nil {
|
||||
return "", err
|
||||
}
|
||||
stats := string(raw)
|
||||
// Only dumping the runtime.MemStats numbers to avoid polluting the log.
|
||||
numLines := 23
|
||||
lines := strings.Split(stats, "\n")
|
||||
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
|
||||
}
|
||||
|
||||
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
|
||||
podList, err := GetKubeletPods(c, nodeName)
|
||||
if err != nil {
|
||||
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
|
||||
return
|
||||
}
|
||||
for _, p := range podList.Items {
|
||||
Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
|
||||
for _, c := range p.Status.ContainerStatuses {
|
||||
Logf("\tContainer %v ready: %v, restart count %v",
|
||||
c.Name, c.Ready, c.RestartCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *ContainerResourceUsage {
|
||||
return &ContainerResourceUsage{
|
||||
Name: name,
|
||||
Timestamp: newStats.CPU.Time.Time,
|
||||
CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()),
|
||||
MemoryUsageInBytes: *newStats.Memory.UsageBytes,
|
||||
MemoryWorkingSetInBytes: *newStats.Memory.WorkingSetBytes,
|
||||
MemoryRSSInBytes: *newStats.Memory.RSSBytes,
|
||||
CPUInterval: newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
// resourceCollector periodically polls the node, collect stats for a given
|
||||
// list of containers, computes and cache resource usage up to
|
||||
// maxEntriesPerContainer for each container.
|
||||
type resourceCollector struct {
|
||||
lock sync.RWMutex
|
||||
node string
|
||||
containers []string
|
||||
client clientset.Interface
|
||||
buffers map[string][]*ContainerResourceUsage
|
||||
pollingInterval time.Duration
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
|
||||
buffers := make(map[string][]*ContainerResourceUsage)
|
||||
return &resourceCollector{
|
||||
node: nodeName,
|
||||
containers: containerNames,
|
||||
client: c,
|
||||
buffers: buffers,
|
||||
pollingInterval: pollingInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts a goroutine to Poll the node every pollingInterval.
|
||||
func (r *resourceCollector) Start() {
|
||||
r.stopCh = make(chan struct{}, 1)
|
||||
// Keep the last observed stats for comparison.
|
||||
oldStats := make(map[string]*stats.ContainerStats)
|
||||
go wait.Until(func() { r.collectStats(oldStats) }, r.pollingInterval, r.stopCh)
|
||||
}
|
||||
|
||||
// Stop sends a signal to terminate the stats collecting goroutine.
|
||||
func (r *resourceCollector) Stop() {
|
||||
close(r.stopCh)
|
||||
}
|
||||
|
||||
// collectStats gets the latest stats from kubelet stats summary API, computes
|
||||
// the resource usage, and pushes it to the buffer.
|
||||
func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.ContainerStats) {
|
||||
summary, err := getNodeStatsSummary(r.client, r.node)
|
||||
if err != nil {
|
||||
Logf("Error getting node stats summary on %q, err: %v", r.node, err)
|
||||
return
|
||||
}
|
||||
cStatsMap := getSystemContainerStats(summary)
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
for _, name := range r.containers {
|
||||
cStats, ok := cStatsMap[name]
|
||||
if !ok {
|
||||
Logf("Missing info/stats for container %q on node %q", name, r.node)
|
||||
return
|
||||
}
|
||||
|
||||
if oldStats, ok := oldStatsMap[name]; ok {
|
||||
if oldStats.CPU.Time.Equal(&cStats.CPU.Time) {
|
||||
// No change -> skip this stat.
|
||||
continue
|
||||
}
|
||||
r.buffers[name] = append(r.buffers[name], computeContainerResourceUsage(name, oldStats, cStats))
|
||||
}
|
||||
// Update the old stats.
|
||||
oldStatsMap[name] = cStats
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resourceCollector) GetLatest() (ResourceUsagePerContainer, error) {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
stats := make(ResourceUsagePerContainer)
|
||||
for _, name := range r.containers {
|
||||
contStats, ok := r.buffers[name]
|
||||
if !ok || len(contStats) == 0 {
|
||||
return nil, fmt.Errorf("Resource usage on node %q is not ready yet", r.node)
|
||||
}
|
||||
stats[name] = contStats[len(contStats)-1]
|
||||
}
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// Reset frees the stats and start over.
|
||||
func (r *resourceCollector) Reset() {
|
||||
r.lock.Lock()
|
||||
defer r.lock.Unlock()
|
||||
for _, name := range r.containers {
|
||||
r.buffers[name] = []*ContainerResourceUsage{}
|
||||
}
|
||||
}
|
||||
|
||||
type resourceUsageByCPU []*ContainerResourceUsage
|
||||
|
||||
func (r resourceUsageByCPU) Len() int { return len(r) }
|
||||
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores }
|
||||
|
||||
// The percentiles to report.
|
||||
var percentiles = [...]float64{0.05, 0.20, 0.50, 0.70, 0.90, 0.95, 0.99}
|
||||
|
||||
// GetBasicCPUStats returns the percentiles the cpu usage in cores for
|
||||
// containerName. This method examines all data currently in the buffer.
|
||||
func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]float64 {
|
||||
r.lock.RLock()
|
||||
defer r.lock.RUnlock()
|
||||
result := make(map[float64]float64, len(percentiles))
|
||||
usages := r.buffers[containerName]
|
||||
sort.Sort(resourceUsageByCPU(usages))
|
||||
for _, q := range percentiles {
|
||||
index := int(float64(len(usages))*q) - 1
|
||||
if index < 0 {
|
||||
// We don't have enough data.
|
||||
result[q] = 0
|
||||
continue
|
||||
}
|
||||
result[q] = usages[index].CPUUsageInCores
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ResourceMonitor manages a resourceCollector per node.
|
||||
type ResourceMonitor struct {
|
||||
client clientset.Interface
|
||||
containers []string
|
||||
pollingInterval time.Duration
|
||||
collectors map[string]*resourceCollector
|
||||
}
|
||||
|
||||
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
|
||||
return &ResourceMonitor{
|
||||
containers: containerNames,
|
||||
client: c,
|
||||
pollingInterval: pollingInterval,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) Start() {
|
||||
// It should be OK to monitor unschedulable Nodes
|
||||
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
|
||||
}
|
||||
r.collectors = make(map[string]*resourceCollector, 0)
|
||||
for _, node := range nodes.Items {
|
||||
collector := newResourceCollector(r.client, node.Name, r.containers, r.pollingInterval)
|
||||
r.collectors[node.Name] = collector
|
||||
collector.Start()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) Stop() {
|
||||
for _, collector := range r.collectors {
|
||||
collector.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) Reset() {
|
||||
for _, collector := range r.collectors {
|
||||
collector.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) LogLatest() {
|
||||
summary, err := r.GetLatest()
|
||||
if err != nil {
|
||||
Logf("%v", err)
|
||||
}
|
||||
Logf("%s", r.FormatResourceUsage(summary))
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
|
||||
summary := []string{}
|
||||
for node, usage := range s {
|
||||
summary = append(summary, formatResourceUsageStats(node, usage))
|
||||
}
|
||||
return strings.Join(summary, "\n")
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
|
||||
result := make(ResourceUsagePerNode)
|
||||
errs := []error{}
|
||||
for key, collector := range r.collectors {
|
||||
s, err := collector.GetLatest()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
result[key] = s
|
||||
}
|
||||
return result, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
|
||||
result := make(ResourceUsagePerNode)
|
||||
var masterUsage ResourceUsagePerContainer
|
||||
var nodesUsage []ResourceUsagePerContainer
|
||||
for node, usage := range usagePerNode {
|
||||
if strings.HasSuffix(node, "master") {
|
||||
masterUsage = usage
|
||||
} else {
|
||||
nodesUsage = append(nodesUsage, usage)
|
||||
}
|
||||
}
|
||||
nodeAvgUsage := make(ResourceUsagePerContainer)
|
||||
for _, nodeUsage := range nodesUsage {
|
||||
for c, usage := range nodeUsage {
|
||||
if _, found := nodeAvgUsage[c]; !found {
|
||||
nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name}
|
||||
}
|
||||
nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores
|
||||
nodeAvgUsage[c].MemoryUsageInBytes += usage.MemoryUsageInBytes
|
||||
nodeAvgUsage[c].MemoryWorkingSetInBytes += usage.MemoryWorkingSetInBytes
|
||||
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes
|
||||
}
|
||||
}
|
||||
for c := range nodeAvgUsage {
|
||||
nodeAvgUsage[c].CPUUsageInCores /= float64(len(nodesUsage))
|
||||
nodeAvgUsage[c].MemoryUsageInBytes /= uint64(len(nodesUsage))
|
||||
nodeAvgUsage[c].MemoryWorkingSetInBytes /= uint64(len(nodesUsage))
|
||||
nodeAvgUsage[c].MemoryRSSInBytes /= uint64(len(nodesUsage))
|
||||
}
|
||||
result["master"] = masterUsage
|
||||
result["node"] = nodeAvgUsage
|
||||
return result
|
||||
}
|
||||
|
||||
// ContainersCPUSummary is indexed by the container name with each entry a
|
||||
// (percentile, value) map.
|
||||
type ContainersCPUSummary map[string]map[float64]float64
|
||||
|
||||
// NodesCPUSummary is indexed by the node name with each entry a
|
||||
// ContainersCPUSummary map.
|
||||
type NodesCPUSummary map[string]ContainersCPUSummary
|
||||
|
||||
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
|
||||
// Example output for a node (the percentiles may differ):
|
||||
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
|
||||
// container 5th% 50th% 90th% 95th%
|
||||
// "/" 0.051 0.159 0.387 0.455
|
||||
// "/runtime 0.000 0.000 0.146 0.166
|
||||
// "/kubelet" 0.036 0.053 0.091 0.154
|
||||
// "/misc" 0.001 0.001 0.001 0.002
|
||||
var summaryStrings []string
|
||||
var header []string
|
||||
header = append(header, "container")
|
||||
for _, p := range percentiles {
|
||||
header = append(header, fmt.Sprintf("%.0fth%%", p*100))
|
||||
}
|
||||
for nodeName, containers := range summary {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
|
||||
for _, containerName := range TargetContainers() {
|
||||
var s []string
|
||||
s = append(s, fmt.Sprintf("%q", containerName))
|
||||
data, ok := containers[containerName]
|
||||
for _, p := range percentiles {
|
||||
value := "N/A"
|
||||
if ok {
|
||||
value = fmt.Sprintf("%.3f", data[p])
|
||||
}
|
||||
s = append(s, value)
|
||||
}
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(s, "\t"))
|
||||
}
|
||||
w.Flush()
|
||||
summaryStrings = append(summaryStrings, fmt.Sprintf("CPU usage of containers on node %q\n:%s", nodeName, buf.String()))
|
||||
}
|
||||
return strings.Join(summaryStrings, "\n")
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) LogCPUSummary() {
|
||||
summary := r.GetCPUSummary()
|
||||
Logf("%s", r.FormatCPUSummary(summary))
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
|
||||
result := make(NodesCPUSummary)
|
||||
for nodeName, collector := range r.collectors {
|
||||
result[nodeName] = make(ContainersCPUSummary)
|
||||
for _, containerName := range TargetContainers() {
|
||||
data := collector.GetBasicCPUStats(containerName)
|
||||
result[nodeName][containerName] = data
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
|
||||
result := make(NodesCPUSummary)
|
||||
var masterSummary ContainersCPUSummary
|
||||
var nodesSummaries []ContainersCPUSummary
|
||||
for node, summary := range summaryPerNode {
|
||||
if strings.HasSuffix(node, "master") {
|
||||
masterSummary = summary
|
||||
} else {
|
||||
nodesSummaries = append(nodesSummaries, summary)
|
||||
}
|
||||
}
|
||||
|
||||
nodeAvgSummary := make(ContainersCPUSummary)
|
||||
for _, nodeSummary := range nodesSummaries {
|
||||
for c, summary := range nodeSummary {
|
||||
if _, found := nodeAvgSummary[c]; !found {
|
||||
nodeAvgSummary[c] = map[float64]float64{}
|
||||
}
|
||||
for perc, value := range summary {
|
||||
nodeAvgSummary[c][perc] += value
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := range nodeAvgSummary {
|
||||
for perc := range nodeAvgSummary[c] {
|
||||
nodeAvgSummary[c][perc] /= float64(len(nodesSummaries))
|
||||
}
|
||||
}
|
||||
result["master"] = masterSummary
|
||||
result["node"] = nodeAvgSummary
|
||||
return result
|
||||
}
|
277
vendor/k8s.io/kubernetes/test/e2e/framework/log_size_monitoring.go
generated
vendored
Normal file
277
vendor/k8s.io/kubernetes/test/e2e/framework/log_size_monitoring.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimal period between polling log sizes from components
|
||||
pollingPeriod = 60 * time.Second
|
||||
workersNo = 5
|
||||
kubeletLogsPath = "/var/log/kubelet.log"
|
||||
kubeProxyLogsPath = "/var/log/kube-proxy.log"
|
||||
kubeAddonsLogsPath = "/var/log/kube-addons.log"
|
||||
kubeMasterAddonsLogsPath = "/var/log/kube-master-addons.log"
|
||||
apiServerLogsPath = "/var/log/kube-apiserver.log"
|
||||
controllersLogsPath = "/var/log/kube-controller-manager.log"
|
||||
schedulerLogsPath = "/var/log/kube-scheduler.log"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeLogsToCheck = []string{kubeletLogsPath, kubeProxyLogsPath}
|
||||
masterLogsToCheck = []string{kubeletLogsPath, kubeAddonsLogsPath, kubeMasterAddonsLogsPath,
|
||||
apiServerLogsPath, controllersLogsPath, schedulerLogsPath}
|
||||
)
|
||||
|
||||
// TimestampedSize contains a size together with a time of measurement.
|
||||
type TimestampedSize struct {
|
||||
timestamp time.Time
|
||||
size int
|
||||
}
|
||||
|
||||
// LogSizeGatherer is a worker which grabs a WorkItem from the channel and does assigned work.
|
||||
type LogSizeGatherer struct {
|
||||
stopChannel chan bool
|
||||
data *LogsSizeData
|
||||
wg *sync.WaitGroup
|
||||
workChannel chan WorkItem
|
||||
}
|
||||
|
||||
// LogsSizeVerifier gathers data about log files sizes from master and node machines.
|
||||
// It oversees a <workersNo> workers which do the gathering.
|
||||
type LogsSizeVerifier struct {
|
||||
client clientset.Interface
|
||||
stopChannel chan bool
|
||||
// data stores LogSizeData groupped per IP and log_path
|
||||
data *LogsSizeData
|
||||
masterAddress string
|
||||
nodeAddresses []string
|
||||
wg sync.WaitGroup
|
||||
workChannel chan WorkItem
|
||||
workers []*LogSizeGatherer
|
||||
}
|
||||
|
||||
type SingleLogSummary struct {
|
||||
AverageGenerationRate int
|
||||
NumberOfProbes int
|
||||
}
|
||||
|
||||
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
|
||||
|
||||
// node -> file -> data
|
||||
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
|
||||
|
||||
// TODO: make sure that we don't need locking here
|
||||
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "host\tlog_file\taverage_rate (B/s)\tnumber_of_probes\n")
|
||||
for k, v := range *s {
|
||||
fmt.Fprintf(w, "%v\t\t\t\n", k)
|
||||
for path, data := range v {
|
||||
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, data.AverageGenerationRate, data.NumberOfProbes)
|
||||
}
|
||||
}
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||
return PrettyPrintJSON(*s)
|
||||
}
|
||||
|
||||
func (s *LogsSizeDataSummary) SummaryKind() string {
|
||||
return "LogSizeSummary"
|
||||
}
|
||||
|
||||
type LogsSizeData struct {
|
||||
data LogSizeDataTimeseries
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// WorkItem is a command for a worker that contains an IP of machine from which we want to
|
||||
// gather data and paths to all files we're interested in.
|
||||
type WorkItem struct {
|
||||
ip string
|
||||
paths []string
|
||||
backoffMultiplier int
|
||||
}
|
||||
|
||||
func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
|
||||
data := make(LogSizeDataTimeseries)
|
||||
ips := append(nodeAddresses, masterAddress)
|
||||
for _, ip := range ips {
|
||||
data[ip] = make(map[string][]TimestampedSize)
|
||||
}
|
||||
return &LogsSizeData{
|
||||
data: data,
|
||||
lock: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int) {
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.data[ip][path] = append(
|
||||
d.data[ip][path],
|
||||
TimestampedSize{
|
||||
timestamp: timestamp,
|
||||
size: size,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
|
||||
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
|
||||
nodeAddresses, err := NodeSSHHosts(c)
|
||||
ExpectNoError(err)
|
||||
masterAddress := GetMasterHost() + ":22"
|
||||
|
||||
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
|
||||
workers := make([]*LogSizeGatherer, workersNo)
|
||||
|
||||
verifier := &LogsSizeVerifier{
|
||||
client: c,
|
||||
stopChannel: stopChannel,
|
||||
data: prepareData(masterAddress, nodeAddresses),
|
||||
masterAddress: masterAddress,
|
||||
nodeAddresses: nodeAddresses,
|
||||
wg: sync.WaitGroup{},
|
||||
workChannel: workChannel,
|
||||
workers: workers,
|
||||
}
|
||||
verifier.wg.Add(workersNo)
|
||||
for i := 0; i < workersNo; i++ {
|
||||
workers[i] = &LogSizeGatherer{
|
||||
stopChannel: stopChannel,
|
||||
data: verifier.data,
|
||||
wg: &verifier.wg,
|
||||
workChannel: workChannel,
|
||||
}
|
||||
}
|
||||
return verifier
|
||||
}
|
||||
|
||||
// GetSummary returns a summary (average generation rate and number of probes) of the data gathered by LogSizeVerifier
|
||||
func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
||||
result := make(LogsSizeDataSummary)
|
||||
for k, v := range s.data.data {
|
||||
result[k] = make(map[string]SingleLogSummary)
|
||||
for path, data := range v {
|
||||
if len(data) > 1 {
|
||||
last := data[len(data)-1]
|
||||
first := data[0]
|
||||
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
|
||||
result[k][path] = SingleLogSummary{
|
||||
AverageGenerationRate: rate,
|
||||
NumberOfProbes: len(data),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
||||
func (v *LogsSizeVerifier) Run() {
|
||||
v.workChannel <- WorkItem{
|
||||
ip: v.masterAddress,
|
||||
paths: masterLogsToCheck,
|
||||
backoffMultiplier: 1,
|
||||
}
|
||||
for _, node := range v.nodeAddresses {
|
||||
v.workChannel <- WorkItem{
|
||||
ip: node,
|
||||
paths: nodeLogsToCheck,
|
||||
backoffMultiplier: 1,
|
||||
}
|
||||
}
|
||||
for _, worker := range v.workers {
|
||||
go worker.Run()
|
||||
}
|
||||
<-v.stopChannel
|
||||
v.wg.Wait()
|
||||
}
|
||||
|
||||
func (g *LogSizeGatherer) Run() {
|
||||
for g.Work() {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *LogSizeGatherer) pushWorkItem(workItem WorkItem) {
|
||||
select {
|
||||
case <-time.After(time.Duration(workItem.backoffMultiplier) * pollingPeriod):
|
||||
g.workChannel <- workItem
|
||||
case <-g.stopChannel:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Work does a single unit of work: tries to take out a WorkItem from the queue, ssh-es into a given machine,
|
||||
// gathers data, writes it to the shared <data> map, and creates a gorouting which reinserts work item into
|
||||
// the queue with a <pollingPeriod> delay. Returns false if worker should exit.
|
||||
func (g *LogSizeGatherer) Work() bool {
|
||||
var workItem WorkItem
|
||||
select {
|
||||
case <-g.stopChannel:
|
||||
g.wg.Done()
|
||||
return false
|
||||
case workItem = <-g.workChannel:
|
||||
}
|
||||
sshResult, err := SSH(
|
||||
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
|
||||
workItem.ip,
|
||||
TestContext.Provider,
|
||||
)
|
||||
if err != nil {
|
||||
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
|
||||
// In case of repeated error give up.
|
||||
if workItem.backoffMultiplier >= 128 {
|
||||
Logf("Failed to ssh to a node %v multiple times in a row. Giving up.", workItem.ip)
|
||||
g.wg.Done()
|
||||
return false
|
||||
}
|
||||
workItem.backoffMultiplier *= 2
|
||||
go g.pushWorkItem(workItem)
|
||||
return true
|
||||
}
|
||||
workItem.backoffMultiplier = 1
|
||||
results := strings.Split(sshResult.Stdout, " ")
|
||||
|
||||
now := time.Now()
|
||||
for i := 0; i+1 < len(results); i = i + 2 {
|
||||
path := results[i]
|
||||
size, err := strconv.Atoi(results[i+1])
|
||||
if err != nil {
|
||||
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||
continue
|
||||
}
|
||||
g.data.AddNewData(workItem.ip, path, now, size)
|
||||
}
|
||||
go g.pushWorkItem(workItem)
|
||||
return true
|
||||
}
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"api_server_metrics.go",
|
||||
"cluster_autoscaler_metrics.go",
|
||||
"controller_manager_metrics.go",
|
||||
"generic_metrics.go",
|
||||
"kubelet_metrics.go",
|
||||
"metrics_grabber.go",
|
||||
"scheduler_metrics.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/metrics",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/api_server_metrics.go
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/api_server_metrics.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type ApiServerMetrics Metrics
|
||||
|
||||
func (m *ApiServerMetrics) Equal(o ApiServerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewApiServerMetrics() ApiServerMetrics {
|
||||
result := NewMetrics()
|
||||
return ApiServerMetrics(result)
|
||||
}
|
||||
|
||||
func parseApiServerMetrics(data string) (ApiServerMetrics, error) {
|
||||
result := NewApiServerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return ApiServerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) getMetricsFromApiServer() (string, error) {
|
||||
rawOutput, err := g.client.CoreV1().RESTClient().Get().RequestURI("/metrics").Do().Raw()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/cluster_autoscaler_metrics.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/cluster_autoscaler_metrics.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type ClusterAutoscalerMetrics Metrics
|
||||
|
||||
func (m *ClusterAutoscalerMetrics) Equal(o ClusterAutoscalerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewClusterAutoscalerMetrics() ClusterAutoscalerMetrics {
|
||||
result := NewMetrics()
|
||||
return ClusterAutoscalerMetrics(result)
|
||||
}
|
||||
|
||||
func parseClusterAutoscalerMetrics(data string) (ClusterAutoscalerMetrics, error) {
|
||||
result := NewClusterAutoscalerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return ClusterAutoscalerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/controller_manager_metrics.go
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/controller_manager_metrics.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
type ControllerManagerMetrics Metrics
|
||||
|
||||
func (m *ControllerManagerMetrics) Equal(o ControllerManagerMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewControllerManagerMetrics() ControllerManagerMetrics {
|
||||
result := NewMetrics()
|
||||
return ControllerManagerMetrics(result)
|
||||
}
|
||||
|
||||
func parseControllerManagerMetrics(data string) (ControllerManagerMetrics, error) {
|
||||
result := NewControllerManagerMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return ControllerManagerMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
99
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/generic_metrics.go
generated
vendored
Normal file
99
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/generic_metrics.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
type Metrics map[string]model.Samples
|
||||
|
||||
func (m *Metrics) Equal(o Metrics) bool {
|
||||
leftKeySet := []string{}
|
||||
rightKeySet := []string{}
|
||||
for k := range *m {
|
||||
leftKeySet = append(leftKeySet, k)
|
||||
}
|
||||
for k := range o {
|
||||
rightKeySet = append(rightKeySet, k)
|
||||
}
|
||||
if !reflect.DeepEqual(leftKeySet, rightKeySet) {
|
||||
return false
|
||||
}
|
||||
for _, k := range leftKeySet {
|
||||
if !(*m)[k].Equal(o[k]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func PrintSample(sample *model.Sample) string {
|
||||
buf := make([]string, 0)
|
||||
// Id is a VERY special label. For 'normal' container it's useless, but it's necessary
|
||||
// for 'system' containers (e.g. /docker-daemon, /kubelet, etc.). We know if that's the
|
||||
// case by checking if there's a label "kubernetes_container_name" present. It's hacky
|
||||
// but it works...
|
||||
_, normalContainer := sample.Metric["kubernetes_container_name"]
|
||||
for k, v := range sample.Metric {
|
||||
if strings.HasPrefix(string(k), "__") {
|
||||
continue
|
||||
}
|
||||
|
||||
if string(k) == "id" && normalContainer {
|
||||
continue
|
||||
}
|
||||
buf = append(buf, fmt.Sprintf("%v=%v", string(k), v))
|
||||
}
|
||||
return fmt.Sprintf("[%v] = %v", strings.Join(buf, ","), sample.Value)
|
||||
}
|
||||
|
||||
func NewMetrics() Metrics {
|
||||
result := make(Metrics)
|
||||
return result
|
||||
}
|
||||
|
||||
func parseMetrics(data string, output *Metrics) error {
|
||||
dec := expfmt.NewDecoder(strings.NewReader(data), expfmt.FmtText)
|
||||
decoder := expfmt.SampleDecoder{
|
||||
Dec: dec,
|
||||
Opts: &expfmt.DecodeOptions{},
|
||||
}
|
||||
|
||||
for {
|
||||
var v model.Vector
|
||||
if err := decoder.Decode(&v); err != nil {
|
||||
if err == io.EOF {
|
||||
// Expected loop termination condition.
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("Invalid Decode. Skipping.")
|
||||
continue
|
||||
}
|
||||
for _, metric := range v {
|
||||
name := string(metric.Metric[model.MetricNameLabel])
|
||||
(*output)[name] = append((*output)[name], metric)
|
||||
}
|
||||
}
|
||||
}
|
85
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/kubelet_metrics.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type KubeletMetrics Metrics
|
||||
|
||||
func (m *KubeletMetrics) Equal(o KubeletMetrics) bool {
|
||||
return (*Metrics)(m).Equal(Metrics(o))
|
||||
}
|
||||
|
||||
func NewKubeletMetrics() KubeletMetrics {
|
||||
result := NewMetrics()
|
||||
return KubeletMetrics(result)
|
||||
}
|
||||
|
||||
// GrabKubeletMetricsWithoutProxy retrieve metrics from the kubelet on the given node using a simple GET over http.
|
||||
// Currently only used in integration tests.
|
||||
func GrabKubeletMetricsWithoutProxy(nodeName string) (KubeletMetrics, error) {
|
||||
metricsEndpoint := "http://%s/metrics"
|
||||
resp, err := http.Get(fmt.Sprintf(metricsEndpoint, nodeName))
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return parseKubeletMetrics(string(body))
|
||||
}
|
||||
|
||||
func parseKubeletMetrics(data string) (KubeletMetrics, error) {
|
||||
result := NewKubeletMetrics()
|
||||
if err := parseMetrics(data, (*Metrics)(&result)); err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) getMetricsFromNode(nodeName string, kubeletPort int) (string, error) {
|
||||
// There's a problem with timing out during proxy. Wrapping this in a goroutine to prevent deadlock.
|
||||
// Hanging goroutine will be leaked.
|
||||
finished := make(chan struct{})
|
||||
var err error
|
||||
var rawOutput []byte
|
||||
go func() {
|
||||
rawOutput, err = g.client.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", nodeName, kubeletPort)).
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
finished <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-time.After(ProxyTimeout):
|
||||
return "", fmt.Errorf("Timed out when waiting for proxy to gather metrics from %v", nodeName)
|
||||
case <-finished:
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(rawOutput), nil
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user