Fresh dep ensure

This commit is contained in:
Mike Cronce
2018-11-26 13:23:56 -05:00
parent 93cb8a04d7
commit 407478ab9a
9016 changed files with 551394 additions and 279685 deletions

View File

@ -18,6 +18,9 @@ go_test(
"//test/e2e/autoscaling:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/framework/viperconfig:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/instrumentation:go_default_library",
"//test/e2e/kubectl:go_default_library",
"//test/e2e/lifecycle:go_default_library",
@ -43,33 +46,33 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/version:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/framework/providers/aws:go_default_library",
"//test/e2e/framework/providers/azure:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/providers/kubemark:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/plugin/pkg/client/auth:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -11,6 +11,7 @@ go_library(
"aggregator.go",
"certs.go",
"chunking.go",
"crd_conversion_webhook.go",
"crd_watch.go",
"custom_resource_definition.go",
"etcd_failure.go",
@ -26,9 +27,49 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/apimachinery",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/rbac:go_default_library",
"//pkg/apis/rbac/v1beta1:go_default_library",
"//pkg/printers:go_default_library",
"//pkg/util/version:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/authorization/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
"//test/e2e/apps:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
@ -37,44 +78,6 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
"//vendor/k8s.io/api/admissionregistration/v1beta1:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/authorization/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/user:go_default_library",
"//vendor/k8s.io/apiserver/pkg/storage/names:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1:go_default_library",
],
)

View File

@ -32,14 +32,13 @@ import (
unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/discovery"
clientset "k8s.io/client-go/kubernetes"
apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
rbacapi "k8s.io/kubernetes/pkg/apis/rbac"
utilversion "k8s.io/kubernetes/pkg/util/version"
rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
@ -47,7 +46,7 @@ import (
. "github.com/onsi/ginkgo"
)
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.7.0")
var serverAggregatorVersion = utilversion.MustParseSemantic("v1.10.0")
var _ = SIGDescribe("Aggregator", func() {
var ns string
@ -73,12 +72,12 @@ var _ = SIGDescribe("Aggregator", func() {
aggrclient = f.AggregatorClient
})
It("Should be able to support the 1.7 Sample API Server using the current Aggregator", func() {
It("Should be able to support the 1.10 Sample API Server using the current Aggregator", func() {
// Make sure the relevant provider supports Agggregator
framework.SkipUnlessServerVersionGTE(serverAggregatorVersion, f.ClientSet.Discovery())
framework.SkipUnlessProviderIs("gce", "gke")
// Testing a 1.7 version of the sample-apiserver
// Testing a 1.10 version of the sample-apiserver
TestSampleAPIServer(f, imageutils.GetE2EImage(imageutils.APIServer))
})
})
@ -87,34 +86,29 @@ func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientse
// delete the APIService first to avoid causing discovery errors
_ = aggrclient.ApiregistrationV1beta1().APIServices().Delete("v1alpha1.wardle.k8s.io", nil)
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver", nil)
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver-deployment", nil)
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
_ = client.RbacV1beta1().RoleBindings("kube-system").Delete("wardler-auth-reader", nil)
_ = client.RbacV1beta1().ClusterRoles().Delete("wardler", nil)
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":anonymous", nil)
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":auth-delegator", nil)
_ = client.RbacV1beta1().ClusterRoles().Delete("sample-apiserver-reader", nil)
_ = client.RbacV1beta1().ClusterRoleBindings().Delete("wardler:"+namespace+":sample-apiserver-reader", nil)
}
// A basic test if the sample-apiserver code from 1.7 and compiled against 1.7
// A basic test if the sample-apiserver code from 1.10 and compiled against 1.10
// will work on the current Aggregator/API-Server.
func TestSampleAPIServer(f *framework.Framework, image string) {
By("Registering the sample API server.")
client := f.ClientSet
restClient := client.Discovery().RESTClient()
iclient := f.InternalClientset
aggrclient := f.AggregatorClient
namespace := f.Namespace.Name
context := setupServerCert(namespace, "sample-api")
if framework.ProviderIs("gke") {
// kubectl create clusterrolebinding user-cluster-admin-binding --clusterrole=cluster-admin --user=user@domain.com
authenticated := rbacv1beta1.Subject{Kind: rbacv1beta1.GroupKind, Name: user.AllAuthenticated}
framework.BindClusterRole(client.RbacV1beta1(), "cluster-admin", namespace, authenticated)
}
// kubectl create -f namespace.yaml
// NOTE: aggregated apis should generally be set up in there own namespace. As the test framework is setting up a new namespace, we are just using that.
// NOTE: aggregated apis should generally be set up in their own namespace. As the test framework is setting up a new namespace, we are just using that.
// kubectl create -f secret.yaml
secretName := "sample-apiserver-secret"
@ -131,9 +125,61 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
_, err := client.CoreV1().Secrets(namespace).Create(secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// kubectl create -f clusterrole.yaml
_, err = client.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
// role for listing ValidatingWebhookConfiguration/MutatingWebhookConfiguration/Namespaces
ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"},
Rules: []rbacv1beta1.PolicyRule{
rbacv1beta1helpers.NewRule("list").Groups("").Resources("namespaces").RuleOrDie(),
rbacv1beta1helpers.NewRule("list").Groups("admissionregistration.k8s.io").Resources("*").RuleOrDie(),
},
})
framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader")
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":sample-apiserver-reader",
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "sample-apiserver-reader",
},
Subjects: []rbacv1beta1.Subject{
{
APIGroup: "",
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
})
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader")
// kubectl create -f authDelegator.yaml
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":auth-delegator",
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
Subjects: []rbacv1beta1.Subject{
{
APIGroup: "",
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
})
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":auth-delegator")
// kubectl create -f deploy.yaml
deploymentName := "sample-apiserver-deployment"
etcdImage := "quay.io/coreos/etcd:v3.2.18"
etcdImage := "quay.io/coreos/etcd:v3.2.24"
podLabels := map[string]string{"app": "sample-apiserver", "apiserver": "true"}
replicas := int32(1)
zero := int64(0)
@ -230,48 +276,6 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
_, err = client.CoreV1().ServiceAccounts(namespace).Create(sa)
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f authDelegator.yaml
_, err = client.RbacV1beta1().ClusterRoleBindings().Create(&rbacv1beta1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":anonymous",
},
RoleRef: rbacv1beta1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "wardler",
},
Subjects: []rbacv1beta1.Subject{
{
APIGroup: "rbac.authorization.k8s.io",
Kind: "User",
Name: namespace + ":anonymous",
},
},
})
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":anonymous")
// kubectl create -f role.yaml
resourceRule, err := rbacapi.NewRule("create", "delete", "deletecollection", "get", "list", "patch", "update", "watch").Groups("wardle.k8s.io").Resources("flunders").Rule()
framework.ExpectNoError(err, "creating cluster resource rule")
urlRule, err := rbacapi.NewRule("get").URLs("*").Rule()
framework.ExpectNoError(err, "creating cluster url rule")
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
roleLabels := map[string]string{"kubernetes.io/bootstrapping": "wardle-default"}
role := rbacapi.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler",
Labels: roleLabels,
},
Rules: []rbacapi.PolicyRule{resourceRule, urlRule},
}
_, err = iclient.Rbac().ClusterRoles().Create(&role)
if err != nil {
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "creating cluster role wardler - may not have permissions")
// kubectl create -f auth-reader.yaml
_, err = client.RbacV1beta1().RoleBindings("kube-system").Create(&rbacv1beta1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
@ -297,7 +301,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
// Wait for the extension apiserver to be up and healthy
// kubectl get deployments -n <aggregated-api-namespace> && status == Running
// NOTE: aggregated apis should generally be set up in there own namespace (<aggregated-api-namespace>). As the test framework
// NOTE: aggregated apis should generally be set up in their own namespace (<aggregated-api-namespace>). As the test framework
// is setting up a new namespace, we are just using that.
err = framework.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
@ -436,7 +440,7 @@ func TestSampleAPIServer(f *framework.Framework, image string) {
unstruct := &unstructuredv1.Unstructured{}
err = unstruct.UnmarshalJSON(jsonFlunder)
framework.ExpectNoError(err, "unmarshalling test-flunder as unstructured for create using dynamic client")
unstruct, err = dynamicClient.Create(unstruct)
unstruct, err = dynamicClient.Create(unstruct, metav1.CreateOptions{})
framework.ExpectNoError(err, "listing flunders using dynamic client")
// kubectl get flunders

View File

@ -17,14 +17,20 @@ limitations under the License.
package apimachinery
import (
"context"
"fmt"
"math/rand"
"reflect"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -34,13 +40,12 @@ const numberOfTotalResources = 400
var _ = SIGDescribe("Servers with support for API chunking", func() {
f := framework.NewDefaultFramework("chunking")
It("should return chunks of results for list calls", func() {
BeforeEach(func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(20, numberOfTotalResources, func(i int) {
workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- {
_, err := client.Create(&v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
@ -61,7 +66,12 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
}
Fail("Unable to create template %d, exiting", i)
})
})
It("should return chunks of results for list calls", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving those results in paged fashion several times")
for i := 0; i < 3; i++ {
opts := metav1.ListOptions{}
@ -70,20 +80,14 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
for {
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred())
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(list.Items) > int(opts.Limit) {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
if len(lastRV) == 0 {
lastRV = list.ResourceVersion
}
if lastRV != list.ResourceVersion {
Expect(list.ResourceVersion).To(Equal(lastRV))
}
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
@ -97,8 +101,82 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
}
By("retrieving those results all at once")
list, err := client.List(metav1.ListOptions{Limit: numberOfTotalResources + 1})
Expect(err).ToNot(HaveOccurred())
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
Expect(list.Items).To(HaveLen(numberOfTotalResources))
})
It("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", func() {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
By("retrieving the first page")
oneTenth := int64(numberOfTotalResources / 10)
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
firstToken := list.Continue
firstRV := list.ResourceVersion
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
By("retrieving the second page until the token expires")
opts.Continue = firstToken
var inconsistentToken string
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
_, err := client.List(opts)
if err == nil {
framework.Logf("Token %s has not expired yet", firstToken)
return false, nil
}
if err != nil && !errors.IsResourceExpired(err) {
return false, err
}
framework.Logf("got error %s", err)
status, ok := err.(errors.APIStatus)
if !ok {
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
}
inconsistentToken = status.Status().ListMeta.Continue
if len(inconsistentToken) == 0 {
return false, fmt.Errorf("expect non empty continue token")
}
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
return true, nil
})
By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
Expect(list.ResourceVersion).ToNot(Equal(firstRV))
Expect(len(list.Items)).To(BeNumerically("==", opts.Limit))
found := oneTenth
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
By("retrieving all remaining pages")
opts.Continue = list.Continue
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
Expect(err).ToNot(HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
Expect(len(list.Items)).To(BeNumerically("<=", opts.Limit))
Expect(list.ResourceVersion).To(Equal(lastRV))
for _, item := range list.Items {
Expect(item.Name).To(Equal(fmt.Sprintf("template-%04d", found)))
found++
}
if len(list.Continue) == 0 {
break
}
opts.Continue = list.Continue
}
Expect(found).To(BeNumerically("==", numberOfTotalResources))
})
})

View File

@ -0,0 +1,396 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/test/integration"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
const (
secretCRDName = "sample-custom-resource-conversion-webhook-secret"
deploymentCRDName = "sample-crd-conversion-webhook-deployment"
serviceCRDName = "e2e-test-crd-conversion-webhook"
roleBindingCRDName = "crd-conversion-webhook-auth-reader"
)
var serverCRDConversionWebhookVersion = utilversion.MustParseSemantic("v1.13.0-alpha")
var apiVersions = []v1beta1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
},
{
Name: "v2",
Served: true,
Storage: false,
},
}
var alternativeApiVersions = []v1beta1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: false,
},
{
Name: "v2",
Served: true,
Storage: true,
},
}
var _ = SIGDescribe("CustomResourceConversionWebhook [Feature:CustomResourceWebhookConversion]", func() {
var context *certContext
f := framework.NewDefaultFramework("crd-webhook")
var client clientset.Interface
var namespaceName string
BeforeEach(func() {
client = f.ClientSet
namespaceName = f.Namespace.Name
// Make sure the relevant provider supports conversion webhook
framework.SkipUnlessServerVersionGTE(serverCRDConversionWebhookVersion, f.ClientSet.Discovery())
By("Setting up server cert")
context = setupServerCert(f.Namespace.Name, serviceCRDName)
createAuthReaderRoleBindingForCRDConversion(f, f.Namespace.Name)
deployCustomResourceWebhookAndService(f, imageutils.GetE2EImage(imageutils.CRDConversionWebhook), context)
})
AfterEach(func() {
cleanCRDWebhookTest(client, namespaceName)
})
It("Should be able to convert from CR v1 to CR v2", func() {
testcrd, err := framework.CreateMultiVersionTestCRD(f, "stable.example.com", apiVersions,
&v1beta1.WebhookClientConfig{
CABundle: context.signingCert,
Service: &v1beta1.ServiceReference{
Namespace: f.Namespace.Name,
Name: serviceCRDName,
Path: strPtr("/crdconvert"),
}})
if err != nil {
return
}
defer testcrd.CleanUp()
testCustomResourceConversionWebhook(f, testcrd.Crd, testcrd.DynamicClients)
})
It("Should be able to convert a non homogeneous list of CRs", func() {
testcrd, err := framework.CreateMultiVersionTestCRD(f, "stable.example.com", apiVersions,
&v1beta1.WebhookClientConfig{
CABundle: context.signingCert,
Service: &v1beta1.ServiceReference{
Namespace: f.Namespace.Name,
Name: serviceCRDName,
Path: strPtr("/crdconvert"),
}})
if err != nil {
return
}
defer testcrd.CleanUp()
testCRListConversion(f, testcrd)
})
})
func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.CoreV1().Services(namespaceName).Delete(serviceCRDName, nil)
_ = client.AppsV1().Deployments(namespaceName).Delete(deploymentCRDName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(secretCRDName, nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingCRDName, nil)
}
func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) {
By("Create role binding to let cr conversion webhook read extension-apiserver-authentication")
client := f.ClientSet
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
_, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingCRDName,
},
RoleRef: rbacv1.RoleRef{
APIGroup: "",
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
// Webhook uses the default service account.
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "default",
Namespace: namespace,
},
},
})
if err != nil && errors.IsAlreadyExists(err) {
framework.Logf("role binding %s already exists", roleBindingCRDName)
} else {
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
}
}
func deployCustomResourceWebhookAndService(f *framework.Framework, image string, context *certContext) {
By("Deploying the custom resource conversion webhook pod")
client := f.ClientSet
// Creating the secret that contains the webhook's cert.
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretCRDName,
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
"tls.crt": context.cert,
"tls.key": context.key,
},
}
namespace := f.Namespace.Name
_, err := client.CoreV1().Secrets(namespace).Create(secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// Create the deployment of the webhook
podLabels := map[string]string{"app": "sample-crd-conversion-webhook", "crd-webhook": "true"}
replicas := int32(1)
zero := int64(0)
mounts := []v1.VolumeMount{
{
Name: "crd-conversion-webhook-certs",
ReadOnly: true,
MountPath: "/webhook.local.config/certificates",
},
}
volumes := []v1.Volume{
{
Name: "crd-conversion-webhook-certs",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{SecretName: secretCRDName},
},
},
}
containers := []v1.Container{
{
Name: "sample-crd-conversion-webhook",
VolumeMounts: mounts,
Args: []string{
"--tls-cert-file=/webhook.local.config/certificates/tls.crt",
"--tls-private-key-file=/webhook.local.config/certificates/tls.key",
"--alsologtostderr",
"-v=4",
"2>&1",
},
Image: image,
},
}
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: deploymentCRDName,
Labels: podLabels,
},
Spec: apps.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: podLabels,
},
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: podLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: containers,
Volumes: volumes,
},
},
},
}
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
By("Wait for the deployment to be ready")
err = framework.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
err = framework.WaitForDeploymentComplete(client, deployment)
framework.ExpectNoError(err, "waiting for the deployment status valid", image, deploymentCRDName, namespace)
By("Deploying the webhook service")
serviceLabels := map[string]string{"crd-webhook": "true"}
service := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: serviceCRDName,
Labels: map[string]string{"test": "crd-webhook"},
},
Spec: v1.ServiceSpec{
Selector: serviceLabels,
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromInt(443),
},
},
},
}
_, err = client.CoreV1().Services(namespace).Create(service)
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace)
By("Verifying the service has paired with the endpoint")
err = framework.WaitForServiceEndpointsNum(client, namespace, serviceCRDName, 1, 1*time.Second, 30*time.Second)
framework.ExpectNoError(err, "waiting for service %s/%s have %d endpoint", namespace, serviceCRDName, 1)
}
func verifyV1Object(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, obj *unstructured.Unstructured) {
Expect(obj.GetAPIVersion()).To(BeEquivalentTo(crd.Spec.Group + "/v1"))
hostPort, exists := obj.Object["hostPort"]
Expect(exists).To(BeTrue())
Expect(hostPort).To(BeEquivalentTo("localhost:8080"))
_, hostExists := obj.Object["host"]
Expect(hostExists).To(BeFalse())
_, portExists := obj.Object["port"]
Expect(portExists).To(BeFalse())
}
func verifyV2Object(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, obj *unstructured.Unstructured) {
Expect(obj.GetAPIVersion()).To(BeEquivalentTo(crd.Spec.Group + "/v2"))
_, hostPortExists := obj.Object["hostPort"]
Expect(hostPortExists).To(BeFalse())
host, hostExists := obj.Object["host"]
Expect(hostExists).To(BeTrue())
Expect(host).To(BeEquivalentTo("localhost"))
port, portExists := obj.Object["port"]
Expect(portExists).To(BeTrue())
Expect(port).To(BeEquivalentTo("8080"))
}
func testCustomResourceConversionWebhook(f *framework.Framework, crd *v1beta1.CustomResourceDefinition, customResourceClients map[string]dynamic.ResourceInterface) {
name := "cr-instance-1"
By("Creating a v1 custom resource")
crInstance := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/v1",
"metadata": map[string]interface{}{
"name": name,
"namespace": f.Namespace.Name,
},
"hostPort": "localhost:8080",
},
}
_, err := customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
Expect(err).To(BeNil())
By("v2 custom resource should be converted")
v2crd, err := customResourceClients["v2"].Get(name, metav1.GetOptions{})
verifyV2Object(f, crd, v2crd)
}
func testCRListConversion(f *framework.Framework, testCrd *framework.TestCrd) {
crd := testCrd.Crd
customResourceClients := testCrd.DynamicClients
name1 := "cr-instance-1"
name2 := "cr-instance-2"
By("Creating a v1 custom resource")
crInstance := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/v1",
"metadata": map[string]interface{}{
"name": name1,
"namespace": f.Namespace.Name,
},
"hostPort": "localhost:8080",
},
}
_, err := customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
Expect(err).To(BeNil())
// Now cr-instance-1 is stored as v1. lets change storage version
crd, err = integration.UpdateCustomResourceDefinitionWithRetry(testCrd.ApiExtensionClient, crd.Name, func(c *v1beta1.CustomResourceDefinition) {
c.Spec.Versions = alternativeApiVersions
})
Expect(err).To(BeNil())
By("Create a v2 custom resource")
crInstance = &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/v1",
"metadata": map[string]interface{}{
"name": name2,
"namespace": f.Namespace.Name,
},
"hostPort": "localhost:8080",
},
}
// After changing a CRD, the resources for versions will be re-created that can be result in
// cancelled connection (e.g. "grpc connection closed" or "context canceled").
// Just retrying fixes that.
for i := 0; i < 5; i++ {
_, err = customResourceClients["v1"].Create(crInstance, metav1.CreateOptions{})
if err == nil {
break
}
}
Expect(err).To(BeNil())
// Now that we have a v1 and v2 object, both list operation in v1 and v2 should work as expected.
By("List CRs in v1")
list, err := customResourceClients["v1"].List(metav1.ListOptions{})
Expect(err).To(BeNil())
Expect(len(list.Items)).To(BeIdenticalTo(2))
Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(BeTrue())
verifyV1Object(f, crd, &list.Items[0])
verifyV1Object(f, crd, &list.Items[1])
By("List CRs in v2")
list, err = customResourceClients["v2"].List(metav1.ListOptions{})
Expect(err).To(BeNil())
Expect(len(list.Items)).To(BeIdenticalTo(2))
Expect((list.Items[0].GetName() == name1 && list.Items[1].GetName() == name2) ||
(list.Items[0].GetName() == name2 && list.Items[1].GetName() == name1)).To(BeTrue())
verifyV2Object(f, crd, &list.Items[0])
verifyV2Object(f, crd, &list.Items[1])
}

View File

@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -63,14 +63,14 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
noxuDefinition := testserver.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = testserver.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(noxuDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}
@ -80,35 +80,35 @@ var _ = SIGDescribe("CustomResourceDefinition Watch", func() {
noxuResourceClient := newNamespacedCustomResourceClient(ns, f.DynamicClient, noxuDefinition)
watchA, err := watchCRWithName(noxuResourceClient, watchCRNameA)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameA)
watchB, err := watchCRWithName(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to watch custom resource: %s", watchCRNameB)
testCrA := testserver.NewNoxuInstance(ns, watchCRNameA)
testCrB := testserver.NewNoxuInstance(ns, watchCRNameB)
testCrA := fixtures.NewNoxuInstance(ns, watchCRNameA)
testCrB := fixtures.NewNoxuInstance(ns, watchCRNameB)
By("Creating first CR ")
testCrA, err = instantiateCustomResource(testCrA, noxuResourceClient, noxuDefinition)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrA)
expectEvent(watchA, watch.Added, testCrA)
expectNoEvent(watchB, watch.Added, testCrA)
By("Creating second CR")
testCrB, err = instantiateCustomResource(testCrB, noxuResourceClient, noxuDefinition)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to instantiate custom resource: %+v", testCrB)
expectEvent(watchB, watch.Added, testCrB)
expectNoEvent(watchA, watch.Added, testCrB)
By("Deleting first CR")
err = deleteCustomResource(noxuResourceClient, watchCRNameA)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameA)
expectEvent(watchA, watch.Deleted, nil)
expectNoEvent(watchB, watch.Deleted, nil)
By("Deleting second CR")
err = deleteCustomResource(noxuResourceClient, watchCRNameB)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete custom resource: %s", watchCRNameB)
expectEvent(watchB, watch.Deleted, nil)
expectNoEvent(watchA, watch.Deleted, nil)
})
@ -125,7 +125,7 @@ func watchCRWithName(crdResourceClient dynamic.ResourceInterface, name string) (
}
func instantiateCustomResource(instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) {
createdInstance, err := client.Create(instanceToCreate)
createdInstance, err := client.Create(instanceToCreate, metav1.CreateOptions{})
if err != nil {
return nil, err
}

View File

@ -19,8 +19,8 @@ package apimachinery
import (
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@ -34,9 +34,9 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
Context("Simple CustomResourceDefinition", func() {
/*
Testname: crd-creation-test
Description: Create a random Custom Resource Definition and make sure
the API returns success.
Release : v1.9
Testname: Custom Resource Definition, create
Description: Create a API extension client, define a random custom resource definition, create the custom resource. API server MUST be able to create the custom resource.
*/
framework.ConformanceIt("creating/deleting custom resource definition objects works ", func() {
@ -52,16 +52,16 @@ var _ = SIGDescribe("CustomResourceDefinition resources", func() {
framework.Failf("failed to initialize apiExtensionClient: %v", err)
}
randomDefinition := testserver.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
randomDefinition := fixtures.NewRandomNameCustomResourceDefinition(v1beta1.ClusterScoped)
//create CRD and waits for the resource to be recognized and available.
randomDefinition, err = testserver.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
randomDefinition, err = fixtures.CreateNewCustomResourceDefinition(randomDefinition, apiExtensionClient, f.DynamicClient)
if err != nil {
framework.Failf("failed to create CustomResourceDefinition: %v", err)
}
defer func() {
err = testserver.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
err = fixtures.DeleteCustomResourceDefinition(randomDefinition, apiExtensionClient)
if err != nil {
framework.Failf("failed to delete CustomResourceDefinition: %v", err)
}

View File

@ -94,8 +94,9 @@ func doEtcdFailure(failCommand, fixCommand string) {
}
func masterExec(cmd string) {
result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred())
host := framework.GetMasterHost() + ":22"
result, err := framework.SSH(cmd, host, framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred(), "failed to SSH to host %s on provider %s and run command: %q", host, framework.TestContext.Provider, cmd)
if result.Code != 0 {
framework.LogSSHResult(result)
framework.Failf("master exec command returned non-zero")
@ -120,7 +121,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
}
framework.Logf("apiserver has recovered")
return true, nil
@ -130,7 +131,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {
return true, nil

View File

@ -27,7 +27,7 @@ import (
"k8s.io/api/extensions/v1beta1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/testserver"
apiextensionstestserver "k8s.io/apiextensions-apiserver/test/integration/fixtures"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -104,7 +104,7 @@ func getPodTemplateSpec(labels map[string]string) v1.PodTemplateSpec {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@ -128,10 +128,6 @@ func newOwnerDeployment(f *framework.Framework, deploymentName string, labels ma
}
}
func getSelector() map[string]string {
return map[string]string{"app": "gc-test"}
}
func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[string]string) *v1.ReplicationController {
template := getPodTemplateSpec(labels)
return &v1.ReplicationController{
@ -151,45 +147,6 @@ func newOwnerRC(f *framework.Framework, name string, replicas int32, labels map[
}
}
// verifyRemainingDeploymentsReplicaSetsPods verifies if the number
// of the remaining deployments, replica set and pods are deploymentNum,
// rsNum and podNum. It returns error if the communication with the API
// server fails.
func verifyRemainingDeploymentsReplicaSetsPods(
f *framework.Framework,
clientSet clientset.Interface,
deployment *v1beta1.Deployment,
deploymentNum, rsNum, podNum int,
) (bool, error) {
var ret = true
rs, err := clientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
if len(rs.Items) != rsNum {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
}
deployments, err := clientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list deployments: %v", err)
}
if len(deployments.Items) != deploymentNum {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", deploymentNum, len(deployments.Items)))
}
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %v Pods, got %d Pods", podNum, len(pods.Items)))
}
return ret, nil
}
func newGCPod(name string) *v1.Pod {
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
@ -204,69 +161,77 @@ func newGCPod(name string) *v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
}
}
// verifyRemainingReplicationControllersPods verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingReplicationControllersPods(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
rcs, err := rcClient.List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
}
return ret, nil
}
// verifyRemainingCronJobsJobsPods verifies if the number of remaining cronjobs,
// jobs and pods. It returns error if the communication with the API server fails.
func verifyRemainingCronJobsJobsPods(f *framework.Framework, clientSet clientset.Interface,
cjNum, jobNum, podNum int) (bool, error) {
// verifyRemainingObjects verifies if the number of remaining objects.
// It returns error if the communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (bool, error) {
var ret = true
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != cjNum {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", cjNum, len(cronJobs.Items)))
}
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list jobs: %v", err)
}
if len(jobs.Items) != jobNum {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", jobNum, len(jobs.Items)))
}
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
for object, num := range objects {
switch object {
case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
if len(pods.Items) != num {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
}
case "Deployments":
deployments, err := f.ClientSet.ExtensionsV1beta1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
}
if len(deployments.Items) != num {
ret = false
By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
}
case "ReplicaSets":
rs, err := f.ClientSet.ExtensionsV1beta1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
if len(rs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
}
case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err)
}
if len(rcs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
}
case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err)
}
if len(cronJobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
}
case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
}
if len(jobs.Items) != num {
ret = false
By(fmt.Sprintf("expected %d jobs, got %d jobs", num, len(jobs.Items)))
}
default:
return false, fmt.Errorf("object %s is not supported", object)
}
}
return ret, nil
@ -312,7 +277,7 @@ func newCronJob(name, schedule string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "300"},
},
},
@ -336,10 +301,9 @@ var _ = SIGDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
/*
Testname: garbage-collector-delete-rc--propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a ReplicationController should cause pods created
by that RC to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy background
Description: Create a replication controller with 2 Pods. Once RC is created and the first Pod is created, delete RC with deleteOptions.PropagationPolicy set to Background. Deleting the Replication Controller MUST cause pods created by that RC to be deleted.
*/
framework.ConformanceIt("should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet
@ -380,7 +344,8 @@ var _ = SIGDescribe("Garbage collector", func() {
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingReplicationControllersPods(f, clientSet, 0, 0)
objects := map[string]int{"ReplicationControllers": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
@ -394,10 +359,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc--propagation-orphan
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a ReplicationController should cause pods created
by that RC to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete replication controller, propagation policy orphan
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Orphan. Deleting the Replication Controller MUST cause pods created by that RC to be orphaned.
*/
framework.ConformanceIt("should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet
@ -463,6 +427,8 @@ var _ = SIGDescribe("Garbage collector", func() {
gatherMetrics(f)
})
// deleteOptions.OrphanDependents is deprecated in 1.7 and preferred to use the PropagationPolicy.
// Discussion is tracked under https://github.com/kubernetes/kubernetes/issues/65427 to promote for conformance in future.
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet
rcClient := clientSet.CoreV1().ReplicationControllers(f.Namespace.Name)
@ -508,10 +474,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-background
Description: Ensure that if deleteOptions.PropagationPolicy is set to Background,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to also be deleted.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy background
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Background. Deleting the deployment MUST delete the replicaset created by the deployment and also the Pods that belong to the deployments MUST be deleted.
*/
framework.ConformanceIt("should delete RS created by deployment when not orphaning", func() {
clientSet := f.ClientSet
@ -547,7 +512,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 0, 0)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
errList := make([]error, 0)
@ -567,10 +533,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-deployment-propagation-true
Description: Ensure that if deleteOptions.PropagationPolicy is set to Orphan,
then deleting a Deployment should cause ReplicaSets created
by that Deployment to be orphaned.
Release : v1.9
Testname: Garbage Collector, delete deployment, propagation policy orphan
Description: Create a deployment with a replicaset. Once replicaset is created , delete the deployment with deleteOptions.PropagationPolicy set to Orphan. Deleting the deployment MUST cause the replicaset created by the deployment to be orphaned, also the Pods created by the deployments MUST be orphaned.
*/
framework.ConformanceIt("should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan", func() {
clientSet := f.ClientSet
@ -606,7 +571,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the rs")
time.Sleep(30 * time.Second)
ok, err := verifyRemainingDeploymentsReplicaSetsPods(f, clientSet, deployment, 0, 1, 2)
objects := map[string]int{"Deployments": 0, "ReplicaSets": 1, "Pods": 2}
ok, err := verifyRemainingObjects(f, objects)
if err != nil {
framework.Failf("Unexpected error while verifying remaining deployments, rs, and pods: %v", err)
}
@ -641,9 +607,9 @@ var _ = SIGDescribe("Garbage collector", func() {
})
/*
Testname: garbage-collector-delete-rc-after-owned-pods
Description: Ensure that if deleteOptions.PropagationPolicy is set to Foreground,
then a ReplicationController should not be deleted until all its dependent pods are deleted.
Release : v1.9
Testname: Garbage Collector, delete replication controller, after owned pods
Description: Create a replication controller with maximum allocatable Pods between 10 and 100 replicas. Once RC is created and the all Pods are created, delete RC with deleteOptions.PropagationPolicy set to Foreground. Deleting the Replication Controller MUST cause pods created by that RC to be deleted before the RC is deleted.
*/
framework.ConformanceIt("should keep the rc around until all its pods are deleted if the deleteOptions says so", func() {
clientSet := f.ClientSet
@ -729,9 +695,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: this should be an integration test
/*
Testname: garbage-collector-multiple-owners
Description: Ensure that if a Pod has multiple valid owners, it will not be deleted
when one of of those owners gets deleted.
Release : v1.9
Testname: Garbage Collector, multiple owners
Description: Create a replication controller RC1, with maximum allocatable Pods between 10 and 100 replicas. Create second replication controller RC2 and set RC2 as owner for half of those replicas. Once RC1 is created and the all Pods are created, delete RC1 with deleteOptions.PropagationPolicy set to Foreground. Half of the Pods that has RC2 as owner MUST not be deleted but have a deletion timestamp. Deleting the Replication Controller MUST not delete Pods that are owned by multiple replication controllers.
*/
framework.ConformanceIt("should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted", func() {
clientSet := f.ClientSet
@ -771,12 +737,12 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
pods, err := podClient.List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", f.Namespace.Name)
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
for i := 0; i < halfReplicas; i++ {
pod := pods.Items[i]
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
}
By(fmt.Sprintf("delete the rc %s", rc1Name))
@ -843,40 +809,46 @@ var _ = SIGDescribe("Garbage collector", func() {
// TODO: should be an integration test
/*
Testname: garbage-collector-dependency-cycle
Description: Ensure that a dependency cycle will
not block the garbage collector.
Release : v1.9
Testname: Garbage Collector, dependency cycle
Description: Create three pods, patch them with Owner references such that pod1 has pod3, pod2 has pod1 and pod3 has pod2 as owner references respectively. Delete pod1 MUST delete all pods. The dependency cycle MUST not block the garbage collection.
*/
framework.ConformanceIt("should not be blocked by dependency circle", func() {
clientSet := f.ClientSet
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
pod1 := newGCPod("pod1")
pod1Name := "pod1"
pod1 := newGCPod(pod1Name)
pod1, err := podClient.Create(pod1)
Expect(err).NotTo(HaveOccurred())
pod2 := newGCPod("pod2")
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
pod2Name := "pod2"
pod2 := newGCPod(pod2Name)
pod2, err = podClient.Create(pod2)
Expect(err).NotTo(HaveOccurred())
pod3 := newGCPod("pod3")
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
pod3Name := "pod3"
pod3 := newGCPod(pod3Name)
pod3, err = podClient.Create(pod3)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
// create circular dependency
addRefPatch := func(name string, uid types.UID) []byte {
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
}
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, addRefPatch(pod3.Name, pod3.UID))
Expect(err).NotTo(HaveOccurred())
patch1 := addRefPatch(pod3.Name, pod3.UID)
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, addRefPatch(pod1.Name, pod1.UID))
Expect(err).NotTo(HaveOccurred())
patch2 := addRefPatch(pod1.Name, pod1.UID)
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, addRefPatch(pod2.Name, pod2.UID))
Expect(err).NotTo(HaveOccurred())
patch3 := addRefPatch(pod2.Name, pod2.UID)
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
// delete one pod, should result in the deletion of all pods
deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
var pods *v1.PodList
var err2 error
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
@ -939,7 +911,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@ -964,7 +936,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedDependent, err := resourceClient.Create(dependent)
persistedDependent, err := resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@ -1040,7 +1012,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
persistedOwner, err := resourceClient.Create(owner)
persistedOwner, err := resourceClient.Create(owner, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
}
@ -1065,7 +1037,7 @@ var _ = SIGDescribe("Garbage collector", func() {
},
},
}
_, err = resourceClient.Create(dependent)
_, err = resourceClient.Create(dependent, metav1.CreateOptions{})
if err != nil {
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
}
@ -1107,7 +1079,7 @@ var _ = SIGDescribe("Garbage collector", func() {
By("Create the cronjob")
cronJob := newCronJob("simple", "*/1 * * * ?")
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
By("Wait for the CronJob to create new Job")
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
@ -1127,7 +1099,8 @@ var _ = SIGDescribe("Garbage collector", func() {
}
By("Verify if cronjob does not leave jobs nor pods behind")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingCronJobsJobsPods(f, f.ClientSet, 0, 0, 0)
objects := map[string]int{"CronJobs": 0, "Jobs": 0, "Pods": 0}
return verifyRemainingObjects(f, objects)
})
if err != nil {
framework.Failf("Failed to wait for all jobs and pods to be deleted: %v", err)

View File

@ -49,7 +49,7 @@ func stagingClientPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
@ -70,7 +70,7 @@ func testingPod(name, value string) v1.Pod {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@ -243,7 +243,7 @@ func newTestingCronJob(name string, value string) *batchv1beta1.CronJob {
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",

View File

@ -52,8 +52,9 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
ch := make(chan struct{})
go func() {
_, err := c.CoreV1().Pods(ns).Create(newUninitializedPod(podName))
Expect(err).NotTo(HaveOccurred())
pod := newUninitializedPod(podName)
_, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
close(ch)
}()
@ -72,34 +73,35 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
// verify that we can update an initializing pod
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
pod.Annotations = map[string]string{"update-1": "test"}
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
// verify the list call filters out uninitialized pods
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{IncludeUninitialized: true})
Expect(err).NotTo(HaveOccurred())
listOptions := metav1.ListOptions{IncludeUninitialized: true}
pods, err := c.CoreV1().Pods(ns).List(listOptions)
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, given list options: %+v", ns, listOptions)
Expect(pods.Items).To(HaveLen(1))
pods, err = c.CoreV1().Pods(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns)
Expect(pods.Items).To(HaveLen(0))
// clear initializers
pod.Initializers = nil
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
// pod should now start running
err = framework.WaitForPodRunningInNamespace(c, pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, pod.Namespace)
// ensure create call returns
<-ch
// verify that we cannot start the pod initializing again
pod, err = c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
pod.Initializers = &metav1.Initializers{
Pending: []metav1.Initializer{{Name: "Other"}},
}
@ -119,7 +121,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
// create and register an initializer
initializerName := "pod.test.e2e.kubernetes.io"
initializerConfigName := "e2e-test-initializer"
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
initializerConfig := &v1alpha1.InitializerConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
Initializers: []v1alpha1.Initializer{
{
@ -129,11 +131,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
},
},
},
})
}
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(initializerConfig)
if errors.IsNotFound(err) {
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
}
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create and register initializer with config: %+v", initializerConfig)
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
defer cleanupInitializer(c, initializerConfigName, initializerName)
@ -145,8 +148,9 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
ch := make(chan struct{})
go func() {
defer close(ch)
_, err := c.CoreV1().Pods(ns).Create(newInitPod(podName))
Expect(err).NotTo(HaveOccurred())
pod := newInitPod(podName)
_, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
}()
// wait until the pod shows up uninitialized
@ -162,7 +166,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s from namespace: %s", podName, ns)
Expect(pod.Initializers).NotTo(BeNil())
Expect(pod.Initializers.Pending).To(HaveLen(1))
Expect(pod.Initializers.Pending[0].Name).To(Equal(initializerName))
@ -171,14 +175,14 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
By("Completing initialization")
pod.Initializers = nil
pod, err = c.CoreV1().Pods(ns).Update(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update pod %s in namespace %s to: %+v", pod.Name, ns, pod)
// ensure create call returns
<-ch
// pod should now start running
err = framework.WaitForPodRunningInNamespace(c, pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, pod.Namespace)
// bypass initialization by explicitly passing an empty pending list
By("Setting an empty initializer as an admin to bypass initialization")
@ -186,7 +190,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
pod = newUninitializedPod(podName)
pod.Initializers.Pending = nil
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
Expect(pod.Initializers).To(BeNil())
// bypass initialization for mirror pods
@ -198,7 +202,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
}
pod.Spec.NodeName = "node-does-not-yet-exist"
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
Expect(pod.Initializers).To(BeNil())
Expect(pod.Annotations[v1.MirrorPodAnnotationKey]).To(Equal("true"))
})
@ -213,7 +217,7 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
// create and register an initializer, without setting up a controller to handle it.
initializerName := "pod.test.e2e.kubernetes.io"
initializerConfigName := "e2e-test-initializer"
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(&v1alpha1.InitializerConfiguration{
initializerConfig := &v1alpha1.InitializerConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: initializerConfigName},
Initializers: []v1alpha1.Initializer{
{
@ -223,11 +227,12 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
},
},
},
})
}
_, err := c.AdmissionregistrationV1alpha1().InitializerConfigurations().Create(initializerConfig)
if errors.IsNotFound(err) {
framework.Skipf("dynamic configuration of initializers requires the alpha admissionregistration.k8s.io group to be enabled")
}
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create and register initializer with config: %+v", initializerConfig)
// we must remove the initializer when the test is complete and ensure no pods are pending for that initializer
defer cleanupInitializer(c, initializerConfigName, initializerName)
@ -236,31 +241,32 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
time.Sleep(3 * time.Second)
// create a replicaset
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(newReplicaset())
Expect(err).NotTo(HaveOccurred())
rs := newReplicaset()
persistedRS, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(rs)
Expect(err).NotTo(HaveOccurred(), "failed to create replicaset %s in namespace: %s", persistedRS.Name, ns)
// wait for replicaset controller to confirm that it has handled the creation
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "replicaset %s failed to observe generation: %d", persistedRS.Name, persistedRS.Generation)
// update the replicaset spec to trigger a resync
patch := []byte(`{"spec":{"minReadySeconds":5}}`)
persistedRS, err = c.ExtensionsV1beta1().ReplicaSets(ns).Patch(persistedRS.Name, types.StrategicMergePatchType, patch)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to apply to replicaset %s in namespace %s a strategic merge patch: %s", persistedRS.Name, ns, patch)
// wait for replicaset controller to confirm that it has handle the spec update
err = waitForRSObservedGeneration(c, persistedRS.Namespace, persistedRS.Name, persistedRS.Generation)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "replicaset %s failed to observe generation: %d", persistedRS.Name, persistedRS.Generation)
// verify that the replicaset controller doesn't create extra pod
selector, err := metav1.LabelSelectorAsSelector(persistedRS.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to convert label selector %+v of LabelSelector api type into a struct that implements labels.Selector", persistedRS.Spec.Selector)
listOptions := metav1.ListOptions{
LabelSelector: selector.String(),
IncludeUninitialized: true,
}
pods, err := c.CoreV1().Pods(ns).List(listOptions)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s, given list options: %+v", ns, listOptions)
Expect(len(pods.Items)).Should(Equal(1))
})
@ -277,13 +283,13 @@ var _ = SIGDescribe("Initializers [Feature:Initializers]", func() {
framework.Failf("expect err to be timeout error, got %v", err)
}
uninitializedPod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
Expect(uninitializedPod.Initializers).NotTo(BeNil())
Expect(len(uninitializedPod.Initializers.Pending)).Should(Equal(1))
patch := fmt.Sprintf(`{"metadata":{"initializers":{"pending":[{"$patch":"delete","name":"%s"}]}}}`, uninitializedPod.Initializers.Pending[0].Name)
patchedPod, err := c.CoreV1().Pods(ns).Patch(uninitializedPod.Name, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to apply to pod %s in namespace %s a strategic merge patch: %s", uninitializedPod.Name, ns, patch)
Expect(patchedPod.Initializers).To(BeNil())
})
})

View File

@ -45,8 +45,9 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
go func(n int) {
defer wg.Done()
defer GinkgoRecover()
_, err = f.CreateNamespace(fmt.Sprintf("nslifetest-%v", n), nil)
Expect(err).NotTo(HaveOccurred())
ns := fmt.Sprintf("nslifetest-%v", n)
_, err = f.CreateNamespace(ns, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", ns)
}(n)
}
wg.Wait()
@ -54,8 +55,9 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
//Wait 10 seconds, then SEND delete requests for all the namespaces.
By("Waiting 10 seconds")
time.Sleep(time.Duration(10 * time.Second))
deleted, err := framework.DeleteNamespaces(f.ClientSet, []string{"nslifetest"}, nil /* skipFilter */)
Expect(err).NotTo(HaveOccurred())
deleteFilter := []string{"nslifetest"}
deleted, err := framework.DeleteNamespaces(f.ClientSet, deleteFilter, nil /* skipFilter */)
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace(s) containing: %s", deleteFilter)
Expect(len(deleted)).To(Equal(totalNS))
By("Waiting for namespaces to vanish")
@ -93,23 +95,25 @@ func waitForPodInNamespace(c clientset.Interface, ns, podName string) *v1.Pod {
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in namespace: %s", podName, ns)
return pod
}
func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
By("Creating a test namespace")
namespace, err := f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
namespaceName := "nsdeletetest"
namespace, err := f.CreateNamespace(namespaceName, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
By("Creating a pod in the namespace")
podName := "test-pod"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
@ -121,7 +125,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
},
}
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, namespace.Name)
By("Waiting for the pod to have running status")
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
@ -150,7 +154,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
@ -164,26 +168,27 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}))
By("Recreating the namespace")
namespace, err = f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
namespace, err = f.CreateNamespace(namespaceName, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
By("Verifying there are no pods in the namespace")
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(podB.Name, metav1.GetOptions{IncludeUninitialized: true})
Expect(err).To(HaveOccurred())
Expect(err).To(HaveOccurred(), "failed to get pod %s in namespace: %s", podB.Name, namespace.Name)
}
func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
var err error
By("Creating a test namespace")
namespace, err := f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
namespaceName := "nsdeletetest"
namespace, err := f.CreateNamespace(namespaceName, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
By("Waiting for a default service account to be provisioned in namespace")
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failure while waiting for a default service account to be provisioned in namespace: %s", namespace.Name)
By("Creating a service in the namespace")
serviceName := "test-service"
@ -204,11 +209,11 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
},
}
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create service %s in namespace %s", serviceName, namespace.Name)
By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete namespace: %s", namespace.Name)
By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60)
@ -222,12 +227,12 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}))
By("Recreating the namespace")
namespace, err = f.CreateNamespace("nsdeletetest", nil)
Expect(err).NotTo(HaveOccurred())
namespace, err = f.CreateNamespace(namespaceName, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName)
By("Verifying there is no service in the namespace")
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(err).To(HaveOccurred(), "failed to get service %s in namespace: %s", service.Name, namespace.Name)
}
// This test must run [Serial] due to the impact of running other parallel
@ -262,10 +267,18 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
f := framework.NewDefaultFramework("namespaces")
It("should ensure that all pods are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-pods
Description: Ensure that if a namespace is deleted then all pods are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all pods are removed when a namespace is deleted",
func() { ensurePodsAreRemovedWhenNamespaceIsDeleted(f) })
It("should ensure that all services are removed when a namespace is deleted.",
/*
Testname: namespace-deletion-removes-services
Description: Ensure that if a namespace is deleted then all services are removed from that namespace.
*/
framework.ConformanceIt("should ensure that all services are removed when a namespace is deleted",
func() { ensureServicesAreRemovedWhenNamespaceIsDeleted(f) })
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",

View File

@ -18,6 +18,7 @@ package apimachinery
import (
"bytes"
"context"
"fmt"
"text/tabwriter"
@ -31,8 +32,8 @@ import (
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
"k8s.io/client-go/util/workqueue"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/pkg/printers"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -54,11 +55,11 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
framework.Logf("Creating pod %s", podName)
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
table := &metav1beta1.Table{}
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
framework.Logf("Table: %#v", table)
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">", 2))
@ -79,7 +80,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
client := c.CoreV1().PodTemplates(ns)
By("creating a large number of resources")
workqueue.Parallelize(5, 20, func(i int) {
workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) {
for tries := 3; tries >= 0; tries-- {
_, err := client.Create(&v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
@ -106,11 +107,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
VersionedParams(&metav1.ListOptions{Limit: 2}, metav1.ParameterCodec).
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
Do().Into(pagedTable)
Expect(err).NotTo(HaveOccurred())
// TODO: kops PR job is still using etcd2, which prevents this feature from working. Remove this check when kops is upgraded to etcd3
if len(pagedTable.Rows) > 2 {
framework.Skipf("ERROR: This cluster does not support chunking, which means it is running etcd2 and not supported.")
}
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
Expect(len(pagedTable.Rows)).To(Equal(2))
Expect(pagedTable.ResourceVersion).ToNot(Equal(""))
Expect(pagedTable.SelfLink).ToNot(Equal(""))
@ -122,7 +119,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
VersionedParams(&metav1.ListOptions{Continue: pagedTable.Continue}, metav1.ParameterCodec).
SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").
Do().Into(pagedTable)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get pod templates in Table form in namespace: %s", ns)
Expect(len(pagedTable.Rows)).To(BeNumerically(">", 0))
Expect(pagedTable.Rows[0].Cells[0]).To(Equal("template-0002"))
})
@ -132,7 +129,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
table := &metav1beta1.Table{}
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get nodes in Table form across all namespaces")
framework.Logf("Table: %#v", table)
Expect(len(table.ColumnDefinitions)).To(BeNumerically(">=", 2))
@ -160,7 +157,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
},
}
err := c.AuthorizationV1().RESTClient().Post().Resource("selfsubjectaccessreviews").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Body(sar).Do().Into(table)
Expect(err).To(HaveOccurred())
Expect(err).To(HaveOccurred(), "failed to return error when posting self subject access review: %+v, to a backend that does not implement metadata", sar)
Expect(err.(errors.APIStatus).Status().Code).To(Equal(int32(406)))
})
})
@ -169,7 +166,7 @@ func printTable(table *metav1beta1.Table) string {
buf := &bytes.Buffer{}
tw := tabwriter.NewWriter(buf, 5, 8, 1, ' ', 0)
err := printers.PrintTable(table, tw, printers.PrintOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to print table: %+v", table)
tw.Flush()
return buf.String()
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package apimachinery
import (
"fmt"
"math/rand"
"time"
"k8s.io/api/core/v1"
@ -55,15 +57,15 @@ var _ = SIGDescribe("Watchers", func() {
By("creating a watch on configmaps with label A")
watchA, err := watchConfigMaps(f, "", multipleWatchersLabelValueA)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueA)
By("creating a watch on configmaps with label B")
watchB, err := watchConfigMaps(f, "", multipleWatchersLabelValueB)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label: %s", multipleWatchersLabelValueB)
By("creating a watch on configmaps with label A or B")
watchAB, err := watchConfigMaps(f, "", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps with label %s or %s", multipleWatchersLabelValueA, multipleWatchersLabelValueB)
testConfigMapA := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@ -84,7 +86,7 @@ var _ = SIGDescribe("Watchers", func() {
By("creating a configmap with label A and ensuring the correct watchers observe the notification")
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
expectEvent(watchA, watch.Added, testConfigMapA)
expectEvent(watchAB, watch.Added, testConfigMapA)
expectNoEvent(watchB, watch.Added, testConfigMapA)
@ -93,7 +95,7 @@ var _ = SIGDescribe("Watchers", func() {
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
expectEvent(watchA, watch.Modified, testConfigMapA)
expectEvent(watchAB, watch.Modified, testConfigMapA)
expectNoEvent(watchB, watch.Modified, testConfigMapA)
@ -102,28 +104,28 @@ var _ = SIGDescribe("Watchers", func() {
testConfigMapA, err = updateConfigMap(c, ns, testConfigMapA.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
expectEvent(watchA, watch.Modified, testConfigMapA)
expectEvent(watchAB, watch.Modified, testConfigMapA)
expectNoEvent(watchB, watch.Modified, testConfigMapA)
By("deleting configmap A and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
expectEvent(watchA, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil)
expectNoEvent(watchB, watch.Deleted, nil)
By("creating a configmap with label B and ensuring the correct watchers observe the notification")
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
expectEvent(watchB, watch.Added, testConfigMapB)
expectEvent(watchAB, watch.Added, testConfigMapB)
expectNoEvent(watchA, watch.Added, testConfigMapB)
By("deleting configmap B and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
expectEvent(watchB, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil)
expectNoEvent(watchA, watch.Deleted, nil)
@ -149,27 +151,27 @@ var _ = SIGDescribe("Watchers", func() {
By("creating a new configmap")
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
By("modifying the configmap once")
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", testConfigMap.GetName(), ns)
By("modifying the configmap a second time")
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
By("creating a watch on configmaps from the resource version returned by the first update")
testWatch, err := watchConfigMaps(f, testConfigMapFirstUpdate.ObjectMeta.ResourceVersion, fromResourceVersionLabelValue)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmaps from the resource version %s returned by the first update", testConfigMapFirstUpdate.ObjectMeta.ResourceVersion)
By("Expecting to observe notifications for all changes to the configmap after the first update")
expectEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
@ -186,9 +188,10 @@ var _ = SIGDescribe("Watchers", func() {
c := f.ClientSet
ns := f.Namespace.Name
configMapName := "e2e-watch-test-watch-closed"
testConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-watch-closed",
Name: configMapName,
Labels: map[string]string{
watchConfigMapLabelKey: watchRestartedLabelValue,
},
@ -197,17 +200,17 @@ var _ = SIGDescribe("Watchers", func() {
By("creating a watch on configmaps")
testWatchBroken, err := watchConfigMaps(f, "", watchRestartedLabelValue)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
By("modifying the configmap once")
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
By("closing the watch once it receives two notifications")
expectEvent(testWatchBroken, watch.Added, testConfigMap)
@ -221,7 +224,7 @@ var _ = SIGDescribe("Watchers", func() {
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
By("creating a new watch on configmaps from the last resource version observed by the first watch")
lastEventConfigMap, ok := lastEvent.Object.(*v1.ConfigMap)
@ -229,11 +232,11 @@ var _ = SIGDescribe("Watchers", func() {
framework.Failf("Expected last notfication to refer to a configmap but got: %v", lastEvent)
}
testWatchRestarted, err := watchConfigMaps(f, lastEventConfigMap.ObjectMeta.ResourceVersion, watchRestartedLabelValue)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
expectEvent(testWatchRestarted, watch.Modified, testConfigMapSecondUpdate)
@ -250,9 +253,10 @@ var _ = SIGDescribe("Watchers", func() {
c := f.ClientSet
ns := f.Namespace.Name
configMapName := "e2e-watch-test-label-changed"
testConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-watch-test-label-changed",
Name: configMapName,
Labels: map[string]string{
watchConfigMapLabelKey: toBeChangedLabelValue,
},
@ -261,23 +265,23 @@ var _ = SIGDescribe("Watchers", func() {
By("creating a watch on configmaps with a certain label")
testWatch, err := watchConfigMaps(f, "", toBeChangedLabelValue)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configMapName, ns)
By("modifying the configmap once")
testConfigMapFirstUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "1")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace: %s", configMapName, ns)
By("changing the label value of the configmap")
_, err = updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = "wrong-value"
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value", configMapName, ns)
By("Expecting to observe a delete notification for the watched object")
expectEvent(testWatch, watch.Added, testConfigMap)
@ -288,7 +292,7 @@ var _ = SIGDescribe("Watchers", func() {
testConfigMapSecondUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "2")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a second time", configMapName, ns)
By("Expecting not to observe a notification because the object no longer meets the selector's requirements")
expectNoEvent(testWatch, watch.Modified, testConfigMapSecondUpdate)
@ -297,23 +301,66 @@ var _ = SIGDescribe("Watchers", func() {
testConfigMapLabelRestored, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
cm.ObjectMeta.Labels[watchConfigMapLabelKey] = toBeChangedLabelValue
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s by changing label value back", configMapName, ns)
By("modifying the configmap a third time")
testConfigMapThirdUpdate, err := updateConfigMap(c, ns, testConfigMap.GetName(), func(cm *v1.ConfigMap) {
setConfigMapData(cm, "mutation", "3")
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to update configmap %s in namespace %s a third time", configMapName, ns)
By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to delete configmap %s in namespace: %s", configMapName, ns)
By("Expecting to observe an add notification for the watched object when the label value was restored")
expectEvent(testWatch, watch.Added, testConfigMapLabelRestored)
expectEvent(testWatch, watch.Modified, testConfigMapThirdUpdate)
expectEvent(testWatch, watch.Deleted, nil)
})
/*
Testname: watch-consistency
Description: Ensure that concurrent watches are consistent with each other by initiating an additional watch
for events received from the first watch, initiated at the resource version of the event, and checking that all
resource versions of all events match. Events are produced from writes on a background goroutine.
*/
It("should receive events on concurrent watches in same order", func() {
c := f.ClientSet
ns := f.Namespace.Name
iterations := 100
By("starting a background goroutine to produce watch events")
donec := make(chan struct{})
stopc := make(chan struct{})
go func() {
defer GinkgoRecover()
defer close(donec)
produceConfigMapEvents(f, stopc, 5*time.Millisecond)
}()
By("creating watches starting from each resource version of the events produced and verifying they all receive resource versions in the same order")
wcs := []watch.Interface{}
resourceVersion := "0"
for i := 0; i < iterations; i++ {
wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion})
Expect(err).NotTo(HaveOccurred())
wcs = append(wcs, wc)
resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion
for _, wc := range wcs[1:] {
e := waitForNextConfigMapEvent(wc)
if resourceVersion != e.ResourceVersion {
framework.Failf("resource version mismatch, expected %s but got %s", resourceVersion, e.ResourceVersion)
}
}
}
close(stopc)
for _, wc := range wcs {
wc.Stop()
}
<-donec
})
})
func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...string) (watch.Interface, error) {
@ -381,3 +428,70 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru
}
}
}
func waitForNextConfigMapEvent(watch watch.Interface) *v1.ConfigMap {
select {
case event := <-watch.ResultChan():
if configMap, ok := event.Object.(*v1.ConfigMap); ok {
return configMap
} else {
framework.Failf("expected config map")
}
case <-time.After(10 * time.Second):
framework.Failf("timed out waiting for watch event")
}
return nil // should never happen
}
const (
createEvent = iota
updateEvent
deleteEvent
)
func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWaitBetweenEvents time.Duration) {
c := f.ClientSet
ns := f.Namespace.Name
name := func(i int) string {
return fmt.Sprintf("cm-%d", i)
}
existing := []int{}
tc := time.NewTicker(minWaitBetweenEvents)
defer tc.Stop()
i := 0
for range tc.C {
op := rand.Intn(3)
if len(existing) == 0 {
op = createEvent
}
cm := &v1.ConfigMap{}
switch op {
case createEvent:
cm.Name = name(i)
_, err := c.CoreV1().ConfigMaps(ns).Create(cm)
Expect(err).NotTo(HaveOccurred())
existing = append(existing, i)
i += 1
case updateEvent:
idx := rand.Intn(len(existing))
cm.Name = name(existing[idx])
_, err := c.CoreV1().ConfigMaps(ns).Update(cm)
Expect(err).NotTo(HaveOccurred())
case deleteEvent:
idx := rand.Intn(len(existing))
err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
existing = append(existing[:idx], existing[idx+1:]...)
default:
framework.Failf("Unsupported event operation: %d", op)
}
select {
case <-stopc:
return
default:
}
}
}

View File

@ -33,10 +33,10 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -52,27 +52,32 @@ const (
roleBindingName = "webhook-auth-reader"
// The webhook configuration names should not be reused between test instances.
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
webhookForWebhooksConfigName = "e2e-test-webhook-for-webhooks-config"
removableValidatingHookName = "e2e-test-should-be-removable-validating-webhook-config"
removableMutatingHookName = "e2e-test-should-be-removable-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
crWebhookConfigName = "e2e-test-webhook-config-cr"
webhookConfigName = "e2e-test-webhook-config"
attachingPodWebhookConfigName = "e2e-test-webhook-config-attaching-pod"
mutatingWebhookConfigName = "e2e-test-mutating-webhook-config"
podMutatingWebhookConfigName = "e2e-test-mutating-webhook-pod"
crMutatingWebhookConfigName = "e2e-test-mutating-webhook-config-cr"
webhookFailClosedConfigName = "e2e-test-webhook-fail-closed"
validatingWebhookForWebhooksConfigName = "e2e-test-validating-webhook-for-webhooks-config"
mutatingWebhookForWebhooksConfigName = "e2e-test-mutating-webhook-for-webhooks-config"
dummyValidatingWebhookConfigName = "e2e-test-dummy-validating-webhook-config"
dummyMutatingWebhookConfigName = "e2e-test-dummy-mutating-webhook-config"
crdWebhookConfigName = "e2e-test-webhook-config-crd"
skipNamespaceLabelKey = "skip-webhook-admission"
skipNamespaceLabelValue = "yes"
skippedNamespaceName = "exempted-namesapce"
disallowedPodName = "disallowed-pod"
toBeAttachedPodName = "to-be-attached-pod"
hangingPodName = "hanging-pod"
disallowedConfigMapName = "disallowed-configmap"
allowedConfigMapName = "allowed-configmap"
failNamespaceLabelKey = "fail-closed-webhook"
failNamespaceLabelValue = "yes"
failNamespaceName = "fail-closed-namesapce"
addedLabelKey = "added-label"
addedLabelValue = "yes"
)
var serverWebhookVersion = utilversion.MustParseSemantic("v1.8.0")
@ -117,6 +122,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
testWebhook(f)
})
It("Should be able to deny attaching pod", func() {
webhookCleanup := registerWebhookForAttachingPod(f, context)
defer webhookCleanup()
testAttachingPodWebhook(f)
})
It("Should be able to deny custom resource creation", func() {
testcrd, err := framework.CreateTestCRD(f)
if err != nil {
@ -125,7 +136,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
defer testcrd.CleanUp()
webhookCleanup := registerWebhookForCustomResource(f, context, testcrd)
defer webhookCleanup()
testCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClient)
testCustomResourceWebhook(f, testcrd.Crd, testcrd.GetV1DynamicClient())
})
It("Should unconditionally reject operations on fail closed webhook", func() {
@ -146,10 +157,12 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
testMutatingPodWebhook(f)
})
It("Should not be able to prevent deleting validating-webhook-configurations or mutating-webhook-configurations", func() {
webhookCleanup := registerWebhookForWebhookConfigurations(f, context)
defer webhookCleanup()
testWebhookForWebhookConfigurations(f)
It("Should not be able to mutate or prevent deletion of webhook configuration objects", func() {
validatingWebhookCleanup := registerValidatingWebhookForWebhookConfigurations(f, context)
defer validatingWebhookCleanup()
mutatingWebhookCleanup := registerMutatingWebhookForWebhookConfigurations(f, context)
defer mutatingWebhookCleanup()
testWebhooksForWebhookConfigurations(f)
})
It("Should mutate custom resource", func() {
@ -160,7 +173,7 @@ var _ = SIGDescribe("AdmissionWebhook", func() {
defer testcrd.CleanUp()
webhookCleanup := registerMutatingWebhookForCustomResource(f, context, testcrd)
defer webhookCleanup()
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.DynamicClient)
testMutatingCustomResourceWebhook(f, testcrd.Crd, testcrd.GetV1DynamicClient())
})
It("Should deny crd creation", func() {
@ -405,6 +418,53 @@ func registerWebhook(f *framework.Framework, context *certContext) func() {
}
}
func registerWebhookForAttachingPod(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the webhook via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := attachingPodWebhookConfigName
// A webhook that cannot talk to server, with fail-open policy
failOpenHook := failingWebhook(namespace, "fail-open.k8s.io")
policyIgnore := v1beta1.Ignore
failOpenHook.FailurePolicy = &policyIgnore
_, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "deny-attaching-pod.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Connect},
Rule: v1beta1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"pods/attach"},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/pods/attach"),
},
CABundle: context.signingCert,
},
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(configName, nil)
}
}
func registerMutatingWebhookForConfigMap(f *framework.Framework, context *certContext) func() {
client := f.ClientSet
By("Registering the mutating configmap webhook via the AdmissionRegistration API")
@ -560,7 +620,7 @@ func testWebhook(f *framework.Framework) {
// Creating the pod, the request should be rejected
pod := nonCompliantPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
expectedErrMsg1 := "the pod contains unwanted container name"
if !strings.Contains(err.Error(), expectedErrMsg1) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg1, err.Error())
@ -575,8 +635,8 @@ func testWebhook(f *framework.Framework) {
// Creating the pod, the request should be rejected
pod = hangingPod(f)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(BeNil())
expectedTimeoutErr := "request did not complete within allowed duration"
Expect(err).To(HaveOccurred(), "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
expectedTimeoutErr := "request did not complete within"
if !strings.Contains(err.Error(), expectedTimeoutErr) {
framework.Failf("expect timeout error %q, got %q", expectedTimeoutErr, err.Error())
}
@ -585,7 +645,7 @@ func testWebhook(f *framework.Framework) {
// Creating the configmap, the request should be rejected
configmap := nonCompliantConfigMap(f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
Expect(err).NotTo(BeNil())
Expect(err).To(HaveOccurred(), "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
expectedErrMsg := "the configmap contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
@ -602,7 +662,7 @@ func testWebhook(f *framework.Framework) {
},
}
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
toNonCompliantFn := func(cm *v1.ConfigMap) {
@ -612,7 +672,7 @@ func testWebhook(f *framework.Framework) {
cm.Data["webhook-e2e-test"] = "webhook-disallow"
}
_, err = updateConfigMap(client, f.Namespace.Name, allowedConfigMapName, toNonCompliantFn)
Expect(err).NotTo(BeNil())
Expect(err).To(HaveOccurred(), "update (PUT) admitted configmap %s in namespace %s to a non-compliant one should be rejected by webhook", allowedConfigMapName, f.Namespace.Name)
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
@ -620,7 +680,7 @@ func testWebhook(f *framework.Framework) {
By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
patch := nonCompliantConfigMapPatch()
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
Expect(err).NotTo(BeNil())
Expect(err).To(HaveOccurred(), "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
}
@ -639,7 +699,26 @@ func testWebhook(f *framework.Framework) {
By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
configmap = nonCompliantConfigMap(f)
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
Expect(err).To(BeNil())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
}
func testAttachingPodWebhook(f *framework.Framework) {
By("create a pod")
client := f.ClientSet
pod := toBeAttachedPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
err = framework.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred(), "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
By("'kubectl attach' the pod, should be denied by the webhook")
timer := time.NewTimer(30 * time.Second)
defer timer.Stop()
_, err = framework.NewKubectlCommand("attach", fmt.Sprintf("--namespace=%v", f.Namespace.Name), pod.Name, "-i", "-c=container1").WithTimeout(timer.C).Exec()
Expect(err).To(HaveOccurred(), "'kubectl attach' the pod, should be denied by the webhook")
if e, a := "attaching to pod 'to-be-attached-pod' is not allowed", err.Error(); !strings.Contains(a, e) {
framework.Failf("unexpected 'kubectl attach' error message. expected to contain %q, got %q", e, a)
}
}
// failingWebhook returns a webhook with rule of create configmaps,
@ -725,22 +804,24 @@ func testFailClosedWebhook(f *framework.Framework) {
},
}
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
Expect(err).To(HaveOccurred())
Expect(err).To(HaveOccurred(), "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
if !errors.IsInternalError(err) {
framework.Failf("expect an internal error, got %#v", err)
}
}
func registerWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
var err error
client := f.ClientSet
By("Registering a webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
By("Registering a validating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := webhookForWebhooksConfigName
configName := validatingWebhookForWebhooksConfigName
failurePolicy := v1beta1.Fail
// This webhook will deny all requests to Delete admissionregistration objects
// This webhook denies all requests to Delete validating webhook configuration and
// mutating webhook configuration objects. It should never be called, however, because
// dynamic admission webhooks should not be called on requests involving webhook configuration objects.
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
@ -771,7 +852,6 @@ func registerWebhookForWebhookConfigurations(f *framework.Framework, context *ce
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
@ -782,23 +862,76 @@ func registerWebhookForWebhookConfigurations(f *framework.Framework, context *ce
}
}
// This test assumes that the deletion-rejecting webhook defined in
// registerWebhookForWebhookConfigurations is in place.
func testWebhookForWebhookConfigurations(f *framework.Framework) {
func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, context *certContext) func() {
var err error
client := f.ClientSet
By("Creating a validating-webhook-configuration object")
By("Registering a mutating webhook on ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects, via the AdmissionRegistration API")
namespace := f.Namespace.Name
configName := mutatingWebhookForWebhooksConfigName
failurePolicy := v1beta1.Fail
// This webhook adds a label to all requests create to validating webhook configuration and
// mutating webhook configuration objects. It should never be called, however, because
// dynamic admission webhooks should not be called on requests involving webhook configuration objects.
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: configName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "add-label-to-webhook-configurations.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{"admissionregistration.k8s.io"},
APIVersions: []string{"*"},
Resources: []string{
"validatingwebhookconfigurations",
"mutatingwebhookconfigurations",
},
},
}},
ClientConfig: v1beta1.WebhookClientConfig{
Service: &v1beta1.ServiceReference{
Namespace: namespace,
Name: serviceName,
Path: strPtr("/add-label"),
},
CABundle: context.signingCert,
},
FailurePolicy: &failurePolicy,
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", configName, namespace)
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
return func() {
err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(configName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
}
// This test assumes that the deletion-rejecting webhook defined in
// registerValidatingWebhookForWebhookConfigurations and the webhook-config-mutating
// webhook defined in registerMutatingWebhookForWebhookConfigurations already exist.
func testWebhooksForWebhookConfigurations(f *framework.Framework) {
var err error
client := f.ClientSet
By("Creating a dummy validating-webhook-configuration object")
namespace := f.Namespace.Name
failurePolicy := v1beta1.Ignore
_, err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
mutatedValidatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Create(&v1beta1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: removableValidatingHookName,
Name: dummyValidatingWebhookConfigName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "should-be-removable-validating-webhook.k8s.io",
Name: "dummy-validating-webhook.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
// This will not match any real resources so this webhook should never be called.
@ -824,25 +957,28 @@ func testWebhookForWebhookConfigurations(f *framework.Framework) {
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableValidatingHookName, namespace)
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace)
if mutatedValidatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedValidatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyValidatingWebhookConfigName)
}
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
By("Deleting the validating-webhook-configuration, which should be possible to remove")
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(removableValidatingHookName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableValidatingHookName, namespace)
err = client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(dummyValidatingWebhookConfigName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyValidatingWebhookConfigName, namespace)
By("Creating a mutating-webhook-configuration object")
By("Creating a dummy mutating-webhook-configuration object")
_, err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
mutatedMutatingWebhookConfiguration, err := client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(&v1beta1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: removableMutatingHookName,
Name: dummyMutatingWebhookConfigName,
},
Webhooks: []v1beta1.Webhook{
{
Name: "should-be-removable-mutating-webhook.k8s.io",
Name: "dummy-mutating-webhook.k8s.io",
Rules: []v1beta1.RuleWithOperations{{
Operations: []v1beta1.OperationType{v1beta1.Create},
// This will not match any real resources so this webhook should never be called.
@ -868,15 +1004,18 @@ func testWebhookForWebhookConfigurations(f *framework.Framework) {
},
},
})
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", removableMutatingHookName, namespace)
framework.ExpectNoError(err, "registering webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace)
if mutatedMutatingWebhookConfiguration.ObjectMeta.Labels != nil && mutatedMutatingWebhookConfiguration.ObjectMeta.Labels[addedLabelKey] == addedLabelValue {
framework.Failf("expected %s not to be mutated by mutating webhooks but it was", dummyMutatingWebhookConfigName)
}
// The webhook configuration is honored in 10s.
time.Sleep(10 * time.Second)
By("Deleting the mutating-webhook-configuration, which should be possible to remove")
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(removableMutatingHookName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", removableMutatingHookName, namespace)
err = client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(dummyMutatingWebhookConfigName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", dummyMutatingWebhookConfigName, namespace)
}
func createNamespace(f *framework.Framework, ns *v1.Namespace) error {
@ -930,6 +1069,22 @@ func hangingPod(f *framework.Framework) *v1.Pod {
}
}
func toBeAttachedPod(f *framework.Framework) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: toBeAttachedPodName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
Image: imageutils.GetPauseImageName(),
},
},
},
}
}
func nonCompliantConfigMap(f *framework.Framework) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
@ -1002,7 +1157,7 @@ func registerWebhookForCustomResource(f *framework.Framework, context *certConte
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{testcrd.ApiGroup},
APIVersions: []string{testcrd.ApiVersion},
APIVersions: testcrd.GetAPIVersions(),
Resources: []string{testcrd.GetPluralName()},
},
}},
@ -1043,7 +1198,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{testcrd.ApiGroup},
APIVersions: []string{testcrd.ApiVersion},
APIVersions: testcrd.GetAPIVersions(),
Resources: []string{testcrd.GetPluralName()},
},
}},
@ -1062,7 +1217,7 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
Operations: []v1beta1.OperationType{v1beta1.Create},
Rule: v1beta1.Rule{
APIGroups: []string{testcrd.ApiGroup},
APIVersions: []string{testcrd.ApiVersion},
APIVersions: testcrd.GetAPIVersions(),
Resources: []string{testcrd.GetPluralName()},
},
}},
@ -1087,12 +1242,13 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, context *c
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
By("Creating a custom resource that should be denied by the webhook")
crInstanceName := "cr-instance-1"
crInstance := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
"metadata": map[string]interface{}{
"name": "cr-instance-1",
"name": crInstanceName,
"namespace": f.Namespace.Name,
},
"data": map[string]interface{}{
@ -1100,8 +1256,8 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
},
},
}
_, err := customResourceClient.Create(crInstance)
Expect(err).NotTo(BeNil())
_, err := customResourceClient.Create(crInstance, metav1.CreateOptions{})
Expect(err).To(HaveOccurred(), "create custom resource %s in namespace %s should be denied by webhook", crInstanceName, f.Namespace.Name)
expectedErrMsg := "the custom resource contains unwanted data"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
@ -1110,12 +1266,13 @@ func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1
func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1beta1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
By("Creating a custom resource that should be mutated by the webhook")
crName := "cr-instance-1"
cr := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": crd.Spec.Names.Kind,
"apiVersion": crd.Spec.Group + "/" + crd.Spec.Version,
"metadata": map[string]interface{}{
"name": "cr-instance-1",
"name": crName,
"namespace": f.Namespace.Name,
},
"data": map[string]interface{}{
@ -1123,8 +1280,8 @@ func testMutatingCustomResourceWebhook(f *framework.Framework, crd *apiextension
},
},
}
mutatedCR, err := customResourceClient.Create(cr)
Expect(err).To(BeNil())
mutatedCR, err := customResourceClient.Create(cr, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to create custom resource %s in namespace: %s", crName, f.Namespace.Name)
expectedCRData := map[string]interface{}{
"mutation-start": "yes",
"mutation-stage-1": "yes",
@ -1186,12 +1343,18 @@ func testCRDDenyWebhook(f *framework.Framework) {
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, "deny")
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, "deny")
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
apiVersion := "v1"
apiVersions := []apiextensionsv1beta1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
},
}
testcrd := &framework.TestCrd{
Name: name,
Kind: kind,
ApiGroup: group,
ApiVersion: apiVersion,
Name: name,
Kind: kind,
ApiGroup: group,
Versions: apiVersions,
}
// Creating a custom resource definition for use by assorted tests.
@ -1213,8 +1376,8 @@ func testCRDDenyWebhook(f *framework.Framework) {
},
},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: testcrd.ApiGroup,
Version: testcrd.ApiVersion,
Group: testcrd.ApiGroup,
Versions: testcrd.Versions,
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: testcrd.GetPluralName(),
Singular: testcrd.Name,
@ -1227,7 +1390,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
// create CRD
_, err = apiExtensionClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd)
Expect(err).NotTo(BeNil())
Expect(err).To(HaveOccurred(), "create custom resource definition %s should be denied by webhook", testcrd.GetMetaName())
expectedErrMsg := "the crd contains unwanted label"
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())

View File

@ -37,7 +37,28 @@ go_library(
"//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/util/pointer:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
@ -45,27 +66,7 @@ go_library(
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -5,3 +5,5 @@ approvers:
- mfojtik
reviewers:
- sig-apps-reviewers
labels:
- sig/apps

View File

@ -34,6 +34,7 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
@ -179,8 +180,8 @@ var _ = SIGDescribe("CronJob", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring no unexpected event has happened")
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
Expect(err).NotTo(HaveOccurred())
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
Expect(err).To(HaveOccurred())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
@ -213,13 +214,13 @@ var _ = SIGDescribe("CronJob", func() {
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
By("Ensuring there are no active jobs in the cronjob")
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
By("Ensuring the job is not in the cronjob active list")
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
Expect(err).NotTo(HaveOccurred())
By("Ensuring MissingJob event has occurred")
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
Expect(err).To(HaveOccurred())
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
Expect(err).NotTo(HaveOccurred())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
@ -298,7 +299,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
Containers: []v1.Container{
{
Name: "c",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
@ -335,7 +336,7 @@ func deleteCronJob(c clientset.Interface, ns, name string) error {
// Wait for at least given amount of active jobs.
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
curr, err := getCronJob(c, ns, cronJobName)
if err != nil {
return false, err
}
@ -349,7 +350,7 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
// empty after the timeout.
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.BatchV1beta1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
curr, err := getCronJob(c, ns, jobName)
if err != nil {
return false, err
}
@ -362,6 +363,23 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty boo
})
}
// Wait till a given job actually goes away from the Active list for a given cronjob
func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := getCronJob(c, ns, cronJobName)
if err != nil {
return false, err
}
for _, j := range curr.Status.Active {
if j.Name == jobName {
return false, nil
}
}
return true, nil
})
}
// Wait for a job to not exist by listing jobs explicitly.
func waitForJobNotExist(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
@ -425,24 +443,26 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
})
}
// checkNoEventWithReason checks no events with a reason within a list has occurred
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
}
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
if err != nil {
return fmt.Errorf("Error in listing events: %s", err)
}
for _, e := range events.Items {
for _, reason := range reasons {
if e.Reason == reason {
return fmt.Errorf("Found event with reason %s: %#v", reason, e)
// waitForEventWithReason waits for events with a reason within a list has occurred
func waitForEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
return wait.Poll(framework.Poll, 30*time.Second, func() (bool, error) {
sj, err := getCronJob(c, ns, cronJobName)
if err != nil {
return false, err
}
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, sj)
if err != nil {
return false, err
}
for _, e := range events.Items {
for _, reason := range reasons {
if e.Reason == reason {
return true, nil
}
}
}
}
return nil
return false, nil
})
}
// filterNotDeletedJobs returns the job list without any jobs that are pending

View File

@ -281,7 +281,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// Requires master ssh access.
framework.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
framework.GetMasterHost(), "kube-scheduler", ports.InsecureSchedulerPort, restartPollInterval, restartTimeout)
// Create pods while the scheduler is down and make sure the scheduler picks them up by
// scaling the rc to the same size.

View File

@ -24,8 +24,7 @@ import (
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
@ -54,6 +53,10 @@ const (
daemonsetColorLabel = daemonsetLabelPrefix + "color"
)
// The annotation key scheduler.alpha.kubernetes.io/node-selector is for assigning
// node selectors labels to namespaces
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
// This test must be run in serial because it assumes the Daemon Set pods will
// always get scheduled. If we run other tests in parallel, this may not
// happen. In the future, running in parallel may work if we have an eviction
@ -100,7 +103,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ns = f.Namespace.Name
c = f.ClientSet
err := clearDaemonSetNodeLabels(c)
updatedNS, err := updateNamespaceAnnotations(c, ns)
Expect(err).NotTo(HaveOccurred())
ns = updatedNS.Name
err = clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
@ -495,6 +504,26 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
return nil
}
// updateNamespaceAnnotations sets node selectors related annotations on tests namespaces to empty
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
nsClient := c.CoreV1().Namespaces()
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
if err != nil {
return nil, err
}
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
for _, n := range NamespaceNodeSelectors {
ns.Annotations[n] = ""
}
return nsClient.Update(ns)
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.CoreV1().Nodes()
var newNode *v1.Node
@ -520,7 +549,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err
}
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
framework.Logf("failed to update node due to resource version conflict")
return false, nil
}
@ -734,7 +763,7 @@ func curHistory(historyList *apps.ControllerRevisionList, ds *apps.DaemonSet) *a
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)

View File

@ -38,9 +38,9 @@ import (
clientset "k8s.io/client-go/kubernetes"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/framework"
testutil "k8s.io/kubernetes/test/utils"
utilpointer "k8s.io/utils/pointer"
)
const (
@ -70,16 +70,35 @@ var _ = SIGDescribe("Deployment", func() {
It("deployment reaping should cascade to its replica sets and pods", func() {
testDeleteDeployment(f)
})
It("RollingUpdateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment RollingUpdate
Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy.
*/
framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func() {
testRollingUpdateDeployment(f)
})
It("RecreateDeployment should delete old pods and create new ones", func() {
/*
Testname: Deployment Recreate
Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy.
*/
framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func() {
testRecreateDeployment(f)
})
It("deployment should delete old replica sets", func() {
/*
Testname: Deployment RevisionHistoryLimit
Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on
the Deployment's `.spec.revisionHistoryLimit`.
*/
framework.ConformanceIt("deployment should delete old replica sets", func() {
testDeploymentCleanUpPolicy(f)
})
It("deployment should support rollover", func() {
/*
Testname: Deployment Rollover
Description: A conformant Kubernetes distribution MUST support Deployment rollover,
i.e. allow arbitrary number of changes to desired state during rolling update
before the rollout finishes.
*/
framework.ConformanceIt("deployment should support rollover", func() {
testRolloverDeployment(f)
})
It("deployment should support rollback", func() {
@ -91,7 +110,13 @@ var _ = SIGDescribe("Deployment", func() {
It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
testDeploymentsControllerRef(f)
})
It("deployment should support proportional scaling", func() {
/*
Testname: Deployment Proportional Scaling
Description: A conformant Kubernetes distribution MUST support Deployment
proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets
when a Deployment is scaled.
*/
framework.ConformanceIt("deployment should support proportional scaling", func() {
testProportionalScalingDeployment(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues

View File

@ -44,7 +44,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@ -63,7 +63,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
@ -84,7 +84,7 @@ var _ = SIGDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
Expect(err).NotTo(HaveOccurred())
})

View File

@ -106,12 +106,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
f := framework.NewDefaultFramework("network-partition")
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
_, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
@ -197,11 +196,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
master := framework.GetMasterAddress(c)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
return
@ -214,7 +216,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}
}()
framework.BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode)
@ -574,11 +578,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
go controller.Run(stopCh)
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host := framework.GetNodeExternalIP(&node)
master := framework.GetMasterAddress(c)
host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed {
return
@ -588,7 +595,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
expectNodeReadiness(true, newNode)
}()
framework.BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode)

View File

@ -60,11 +60,21 @@ var _ = SIGDescribe("ReplicationController", func() {
testReplicationControllerConditionCheck(f)
})
It("should adopt matching pods on creation", func() {
/*
Release : v1.13
Testname: Replication Controller, adopt matching pods
Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
*/
framework.ConformanceIt("should adopt matching pods on creation", func() {
testRCAdoptMatchingOrphans(f)
})
It("should release no longer matching pods", func() {
/*
Release : v1.13
Testname: Replication Controller, release pods
Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
*/
framework.ConformanceIt("should release no longer matching pods", func() {
testRCReleaseControlledNotMatching(f)
})
})

View File

@ -103,7 +103,12 @@ var _ = SIGDescribe("ReplicaSet", func() {
testReplicaSetConditionCheck(f)
})
It("should adopt matching pods on creation and release no longer matching pods", func() {
/*
Release : v1.13
Testname: Replica Set, adopt matching pods and release non matching pods
Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references
*/
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
testRSAdoptMatchingAndReleaseNotMatching(f)
})
})

View File

@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"strings"
"time"
@ -31,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -111,7 +113,7 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying statefulset set proper service name")
framework.ExpectNoError(sst.CheckServiceName(ss, headlessSvcName))
cmd := "echo $(hostname) > /data/hostname; sync;"
cmd := "echo $(hostname) | dd of=/data/hostname conv=fsync"
By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(sst.ExecInStatefulPods(ss, cmd))
@ -248,6 +250,14 @@ var _ = SIGDescribe("StatefulSet", func() {
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
})
// This can't be Conformance yet because it depends on a default
// StorageClass and a dynamic provisioner.
It("should perform rolling updates and roll backs of template modifications with PVCs", func() {
By("Creating a new StatefulSet with PVCs")
*(ss.Spec.Replicas) = 3
rollbackTest(c, ns, ss)
})
/*
Release : v1.9
Testname: StatefulSet, Rolling Update
@ -256,116 +266,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ConformanceIt("should perform rolling updates and roll backs of template modifications", func() {
By("Creating a new StatefulSet")
ss := framework.NewStatefulSet("ss2", ns, headlessSvcName, 3, nil, nil, labels)
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
rollbackTest(c, ns, ss)
})
/*
@ -700,7 +601,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled up in order")
expectedOrder := []string{ssName + "-0", ssName + "-1", ssName + "-2"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
@ -731,7 +634,9 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Verifying that stateful set " + ssName + " was scaled down in reverse order")
expectedOrder = []string{ssName + "-2", ssName + "-1", ssName + "-0"}
_, err = watch.Until(framework.StatefulSetTimeout, watcher, func(event watch.Event) (bool, error) {
ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulSetTimeout)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Deleted {
return false, nil
}
@ -810,7 +715,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{conflictingPort},
},
},
@ -837,8 +742,10 @@ var _ = SIGDescribe("StatefulSet", func() {
By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
framework.ExpectNoError(err)
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(framework.StatefulPodTimeout, w, func(event watch.Event) (bool, error) {
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.StatefulPodTimeout)
defer cancel()
// we need to get UID from pod in any state and wait until stateful set controller will remove pod at least once
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
pod := event.Object.(*v1.Pod)
switch event.Type {
case watch.Deleted:
@ -862,7 +769,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, thats why we are waiting for running phase like this, and not with watch.Until
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
Eventually(func() error {
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
if err != nil {
@ -1076,7 +983,7 @@ func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
for k, v := range kv {
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
}
}
@ -1176,3 +1083,119 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
}
return err
}
// This function is used by two tests to test StatefulSet rollbacks: one using
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
sst := framework.NewStatefulSetTester(c)
sst.SetHttpProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
Expect(err).NotTo(HaveOccurred())
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s created with update revision %s not equal to current revision %s",
ss.Namespace, ss.Name, updateRevision, currentRevision))
pods := sst.GetPodList(ss)
for i := range pods.Items {
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(currentRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to current revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
currentRevision))
}
sst.SortStatefulPods(pods)
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
By(fmt.Sprintf("Updating StatefulSet template: update image from %s to %s", oldImage, newImage))
Expect(oldImage).NotTo(Equal(newImage), "Incorrect test setup: should update to a different image")
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
Expect(err).NotTo(HaveOccurred())
By("Creating a new revision")
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during rolling update")
By("Updating Pods in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
err = sst.RestorePodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(newImage),
fmt.Sprintf(" Pod %s/%s has image %s not have new image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
newImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(updateRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to update revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
updateRevision))
}
By("Rolling back to a previous revision")
err = sst.BreakPodHttpProbe(ss, &pods.Items[1])
Expect(err).NotTo(HaveOccurred())
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
Expect(err).NotTo(HaveOccurred())
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
Expect(currentRevision).NotTo(Equal(updateRevision),
"Current revision should not equal update revision during roll back")
Expect(priorRevision).To(Equal(updateRevision),
"Prior revision should equal update revision during roll back")
By("Rolling back update in reverse ordinal order")
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
sst.RestorePodHttpProbe(ss, &pods.Items[1])
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
Expect(ss.Status.CurrentRevision).To(Equal(priorRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal prior revision %s on rollback completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
updateRevision))
for i := range pods.Items {
Expect(pods.Items[i].Spec.Containers[0].Image).To(Equal(oldImage),
fmt.Sprintf("Pod %s/%s has image %s not equal to previous image %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Spec.Containers[0].Image,
oldImage))
Expect(pods.Items[i].Labels[apps.StatefulSetRevisionLabel]).To(Equal(priorRevision),
fmt.Sprintf("Pod %s/%s revision %s is not equal to prior revision %s",
pods.Items[i].Namespace,
pods.Items[i].Name,
pods.Items[i].Labels[apps.StatefulSetRevisionLabel],
priorRevision))
}
}

View File

@ -31,7 +31,7 @@ var (
CronJobGroupVersionResourceBeta = schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}
NautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
KittenImage = imageutils.GetE2EImage(imageutils.Kitten)
NginxImage = imageutils.GetE2EImage(imageutils.NginxSlim)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxSlimNew)
NginxImage = imageutils.GetE2EImage(imageutils.Nginx)
NewNginxImage = imageutils.GetE2EImage(imageutils.NginxNew)
RedisImage = imageutils.GetE2EImage(imageutils.Redis)
)

View File

@ -12,45 +12,48 @@ go_library(
"certificates.go",
"framework.go",
"metadata_concealment.go",
"node_authn.go",
"node_authz.go",
"pod_security_policy.go",
"service_accounts.go",
],
importpath = "k8s.io/kubernetes/test/e2e/auth",
deps = [
"//pkg/master/ports:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/security/podsecuritypolicy/util:go_default_library",
"//pkg/util/pointer:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/evanphx/json-patch:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -4,11 +4,9 @@ reviewers:
- smarterclayton
- sttts
- tallclair
- ericchiang
approvers:
- liggitt
- mikedanese
- smarterclayton
- sttts
- tallclair
- ericchiang

File diff suppressed because it is too large Load Diff

View File

@ -91,9 +91,10 @@ var _ = SIGDescribe("Certificates API", func() {
framework.Logf("waiting for CSR to be signed")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
csr, err = csrs.Get(csrName, metav1.GetOptions{})
if err != nil {
return false, err
framework.Logf("error getting csr: %v", err)
return false, nil
}
if len(csr.Status.Certificate) == 0 {
framework.Logf("csr not signed yet")

View File

@ -55,10 +55,10 @@ var _ = SIGDescribe("Metadata Concealment", func() {
},
}
job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create job (%s:%s)", f.Namespace.Name, job.Name)
By("Ensuring job reaches completions")
err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
Expect(err).NotTo(HaveOccurred(), "failed to ensure job completion (%s:%s)", f.Namespace.Name, job.Name)
})
})

108
vendor/k8s.io/kubernetes/test/e2e/auth/node_authn.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
f := framework.NewDefaultFramework("node-authn")
var ns string
var nodeIPs []string
BeforeEach(func() {
ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to list nodes in namespace: %s", ns)
Expect(len(nodeList.Items)).NotTo(BeZero())
pickedNode := nodeList.Items[0]
nodeIPs = framework.GetNodeAddresses(&pickedNode, v1.NodeExternalIP)
// The pods running in the cluster can see the internal addresses.
nodeIPs = append(nodeIPs, framework.GetNodeAddresses(&pickedNode, v1.NodeInternalIP)...)
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
saName := "default"
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to retrieve service account (%s:%s)", ns, saName)
Expect(len(sa.Secrets)).NotTo(BeZero())
})
It("The kubelet's main port 10250 should reject requests with no credentials", func() {
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
// Anonymous authentication is disabled by default
result := framework.RunHostCmdOrDie(ns, pod.Name, fmt.Sprintf("curl -sIk -o /dev/null -w '%s' https://%s:%v/metrics", "%{http_code}", nodeIP, ports.KubeletPort))
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet's main port 10250 should reject requests with no credentials")
}
})
It("The kubelet can delegate ServiceAccount tokens to the API server", func() {
By("create a new ServiceAccount for authentication")
trueValue := true
newSA := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: "node-auth-newSA",
},
AutomountServiceAccountToken: &trueValue,
}
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
Expect(err).NotTo(HaveOccurred(), "failed to create service account (%s:%s)", ns, newSA.Name)
pod := createNodeAuthTestPod(f)
for _, nodeIP := range nodeIPs {
result := framework.RunHostCmdOrDie(ns,
pod.Name,
fmt.Sprintf("curl -sIk -o /dev/null -w '%s' --header \"Authorization: Bearer `%s`\" https://%s:%v/metrics",
"%{http_code}",
"cat /var/run/secrets/kubernetes.io/serviceaccount/token",
nodeIP, ports.KubeletPort))
Expect(result).To(Or(Equal("401"), Equal("403")), "the kubelet can delegate ServiceAccount tokens to the API server")
}
})
})
func createNodeAuthTestPod(f *framework.Framework) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-node-authn-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test-node-authn",
Image: imageutils.GetE2EImage(imageutils.Hostexec),
Command: []string{"sleep 3600"},
}},
RestartPolicy: v1.RestartPolicyNever,
},
}
return f.PodClient().CreateSync(pod)
}

View File

@ -51,23 +51,24 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to list nodes in namespace: %s", ns)
Expect(len(nodeList.Items)).NotTo(Equal(0))
nodeName = nodeList.Items[0].Name
asUser = NodeNamePrefix + nodeName
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get("default", metav1.GetOptions{})
saName := "default"
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
Expect(len(sa.Secrets)).NotTo(Equal(0))
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to retrieve service account (%s:%s)", ns, saName)
defaultSaSecret = sa.Secrets[0].Name
By("Creating a kubernetes client that impersonates a node")
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to load kubernetes client config")
config.Impersonate = restclient.ImpersonationConfig{
UserName: asUser,
Groups: []string{NodesGroup},
}
c, err = clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create Clientset for the given config: %+v", *config)
})
It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
@ -97,7 +98,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
},
}
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
Expect(apierrors.IsForbidden(err)).Should(Equal(true))
})
@ -114,7 +115,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
},
}
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create secret (%s:%s)", ns, secret.Name)
By("Node should not get the secret")
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
@ -147,10 +148,12 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}
_, err = f.ClientSet.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to create pod (%s:%s)", ns, pod.Name)
By("The node should able to access the secret")
err = wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
itv := framework.Poll
dur := 1 * time.Minute
err = wait.Poll(itv, dur, func() (bool, error) {
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
@ -158,7 +161,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "failed to get secret after trying every %v for %v (%s:%s)", itv, dur, ns, secret.Name)
})
It("A node shouldn't be able to create another node", func() {

View File

@ -20,7 +20,6 @@ import (
"fmt"
"k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
policy "k8s.io/api/policy/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@ -32,10 +31,10 @@ import (
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
utilpointer "k8s.io/kubernetes/pkg/util/pointer"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
utilpointer "k8s.io/utils/pointer"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -75,75 +74,35 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
It("should forbid pod creation when no PSP is available", func() {
By("Running a restricted pod")
_, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "restricted"))
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
expectForbidden(err)
})
// TODO: merge tests for extensions/policy API groups when PSP will be completely moved out of the extensions
It("should enforce the restricted extensions.PodSecurityPolicy", func() {
By("Creating & Binding a restricted policy for the test service account")
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
defer cleanup()
By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(f, func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
expectForbidden(err)
})
})
It("should enforce the restricted policy.PodSecurityPolicy", func() {
By("Creating & Binding a restricted policy for the test service account")
_, cleanup := createAndBindPSPInPolicy(f, restrictedPSPInPolicy("restrictive"))
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
defer cleanup()
By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod(f, "allowed"))
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
expectForbidden(err)
})
})
It("should allow pods under the privileged extensions.PodSecurityPolicy", func() {
By("Creating & Binding a privileged policy for the test service account")
// Ensure that the permissive policy is used even in the presence of the restricted policy.
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
defer cleanup()
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
defer cleanup()
testPrivilegedPods(f, func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
// Verify expected PSP was used.
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
Expect(found).To(BeTrue(), "PSP annotation not found")
Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP")
})
})
It("should allow pods under the privileged policy.PodSecurityPolicy", func() {
By("Creating & Binding a privileged policy for the test service account")
// Ensure that the permissive policy is used even in the presence of the restricted policy.
_, cleanup := createAndBindPSPInPolicy(f, restrictedPSPInPolicy("restrictive"))
_, cleanup := createAndBindPSP(f, restrictedPSP("restrictive"))
defer cleanup()
expectedPSP, cleanup := createAndBindPSPInPolicy(f, privilegedPSPInPolicy("permissive"))
expectedPSP, cleanup := createAndBindPSP(f, privilegedPSP("permissive"))
defer cleanup()
testPrivilegedPods(f, func(pod *v1.Pod) {
testPrivilegedPods(func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
@ -163,16 +122,16 @@ func expectForbidden(err error) {
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
}
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
func testPrivilegedPods(tester func(pod *v1.Pod)) {
By("Running a privileged pod", func() {
privileged := restrictedPod(f, "privileged")
privileged := restrictedPod("privileged")
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
tester(privileged)
})
By("Running a HostPath pod", func() {
hostpath := restrictedPod(f, "hostpath")
hostpath := restrictedPod("hostpath")
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
Name: "hp",
MountPath: "/hp",
@ -187,26 +146,26 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
})
By("Running a HostNetwork pod", func() {
hostnet := restrictedPod(f, "hostnet")
hostnet := restrictedPod("hostnet")
hostnet.Spec.HostNetwork = true
tester(hostnet)
})
By("Running a HostPID pod", func() {
hostpid := restrictedPod(f, "hostpid")
hostpid := restrictedPod("hostpid")
hostpid.Spec.HostPID = true
tester(hostpid)
})
By("Running a HostIPC pod", func() {
hostipc := restrictedPod(f, "hostipc")
hostipc := restrictedPod("hostipc")
hostipc.Spec.HostIPC = true
tester(hostipc)
})
if common.IsAppArmorSupported() {
By("Running a custom AppArmor profile pod", func() {
aa := restrictedPod(f, "apparmor")
aa := restrictedPod("apparmor")
// Every node is expected to have the docker-default profile.
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
tester(aa)
@ -214,13 +173,13 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
}
By("Running an unconfined Seccomp pod", func() {
unconfined := restrictedPod(f, "seccomp")
unconfined := restrictedPod("seccomp")
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
tester(unconfined)
})
By("Running a SYS_ADMIN pod", func() {
sysadmin := restrictedPod(f, "sysadmin")
sysadmin := restrictedPod("sysadmin")
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
Add: []v1.Capability{"SYS_ADMIN"},
}
@ -229,49 +188,8 @@ func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
})
}
func createAndBindPSP(f *framework.Framework, pspTemplate *extensionsv1beta1.PodSecurityPolicy) (psp *extensionsv1beta1.PodSecurityPolicy, cleanup func()) {
// Create the PodSecurityPolicy object.
psp = pspTemplate.DeepCopy()
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
ns := f.Namespace.Name
name := fmt.Sprintf("%s-%s", ns, psp.Name)
psp.Name = name
psp, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
framework.ExpectNoError(err, "Failed to create PSP")
// Create the Role to bind it to the namespace.
_, err = f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Rules: []rbacv1beta1.PolicyRule{{
APIGroups: []string{"extensions"},
Resources: []string{"podsecuritypolicies"},
ResourceNames: []string{name},
Verbs: []string{"use"},
}},
})
framework.ExpectNoError(err, "Failed to create PSP role")
// Bind the role to the namespace.
framework.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{
Kind: rbacv1beta1.ServiceAccountKind,
Namespace: ns,
Name: "default",
})
framework.ExpectNoError(framework.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
return psp, func() {
// Cleanup non-namespaced PSP object.
f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
}
}
// createAndBindPSPInPolicy creates a PSP in the policy API group (unlike createAndBindPSP()).
// TODO: merge these functions when PSP will be completely moved out of the extensions
func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSecurityPolicy) (psp *policy.PodSecurityPolicy, cleanup func()) {
// createAndBindPSP creates a PSP in the policy API group.
func createAndBindPSP(f *framework.Framework, pspTemplate *policy.PodSecurityPolicy) (psp *policy.PodSecurityPolicy, cleanup func()) {
// Create the PodSecurityPolicy object.
psp = pspTemplate.DeepCopy()
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
@ -311,7 +229,7 @@ func createAndBindPSPInPolicy(f *framework.Framework, pspTemplate *policy.PodSec
}
}
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
func restrictedPod(name string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -334,8 +252,7 @@ func restrictedPod(f *framework.Framework, name string) *v1.Pod {
}
// privilegedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that allows everything.
// TODO: replace by PrivilegedPSP when PSP will be completely moved out of the extensions
func privilegedPSPInPolicy(name string) *policy.PodSecurityPolicy {
func privilegedPSP(name string) *policy.PodSecurityPolicy {
return &policy.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -368,8 +285,7 @@ func privilegedPSPInPolicy(name string) *policy.PodSecurityPolicy {
}
// restrictedPSPInPolicy creates a PodSecurityPolicy (in the "policy" API Group) that is most strict.
// TODO: replace by restrictedPSP when PSP will be completely moved out of the extensions
func restrictedPSPInPolicy(name string) *policy.PodSecurityPolicy {
func restrictedPSP(name string) *policy.PodSecurityPolicy {
return &policy.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
@ -423,61 +339,6 @@ func restrictedPSPInPolicy(name string) *policy.PodSecurityPolicy {
}
}
// restrictedPSP creates a PodSecurityPolicy that is most strict.
func restrictedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
return &extensionsv1beta1.PodSecurityPolicy{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
seccomp.AllowedProfilesAnnotationKey: v1.SeccompProfileRuntimeDefault,
seccomp.DefaultProfileAnnotationKey: v1.SeccompProfileRuntimeDefault,
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
},
},
Spec: extensionsv1beta1.PodSecurityPolicySpec{
Privileged: false,
AllowPrivilegeEscalation: utilpointer.BoolPtr(false),
RequiredDropCapabilities: []v1.Capability{
"AUDIT_WRITE",
"CHOWN",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"MKNOD",
"NET_RAW",
"SETGID",
"SETUID",
"SYS_CHROOT",
},
Volumes: []extensionsv1beta1.FSType{
extensionsv1beta1.ConfigMap,
extensionsv1beta1.EmptyDir,
extensionsv1beta1.PersistentVolumeClaim,
"projected",
extensionsv1beta1.Secret,
},
HostNetwork: false,
HostIPC: false,
HostPID: false,
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
Rule: extensionsv1beta1.RunAsUserStrategyMustRunAsNonRoot,
},
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
},
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
},
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
},
ReadOnlyRootFilesystem: false,
},
}
}
func boolPtr(b bool) *bool {
return &b
}

View File

@ -153,6 +153,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
})
/*
Release: v1.9
Testname: Service Account Tokens Must AutoMount
Description: Ensure that Service Account keys are mounted into the Container. Pod
contains three containers each will read Service Account token,
root CA and default namespace respectively from the default API
Token Mount path. All these three files MUST exist and the Service
Account mount path MUST be auto mounted to the Container.
*/
framework.ConformanceIt("should mount an API token into pods ", func() {
var tokenContent string
var rootCAContent string
@ -235,7 +244,33 @@ var _ = SIGDescribe("ServiceAccounts", func() {
})
})
/*
Release: v1.9
Testname: Service account tokens auto mount optionally
Description: Ensure that Service Account keys are mounted into the Pod only
when AutoMountServiceToken is not set to false. We test the
following scenarios here.
1. Create Pod, Pod Spec has AutomountServiceAccountToken set to nil
a) Service Account with default value,
b) Service Account is an configured AutomountServiceAccountToken set to true,
c) Service Account is an configured AutomountServiceAccountToken set to false
2. Create Pod, Pod Spec has AutomountServiceAccountToken set to true
a) Service Account with default value,
b) Service Account is configured with AutomountServiceAccountToken set to true,
c) Service Account is configured with AutomountServiceAccountToken set to false
3. Create Pod, Pod Spec has AutomountServiceAccountToken set to false
a) Service Account with default value,
b) Service Account is configured with AutomountServiceAccountToken set to true,
c) Service Account is configured with AutomountServiceAccountToken set to false
The Containers running in these pods MUST verify that the ServiceTokenVolume path is
auto mounted only when Pod Spec has AutomountServiceAccountToken not set to false
and ServiceAccount object has AutomountServiceAccountToken not set to false, this
include test cases 1a,1b,2a,2b and 2c.
In the test cases 1c,3a,3b and 3c the ServiceTokenVolume MUST not be auto mounted.
*/
framework.ConformanceIt("should allow opting out of API token automount ", func() {
var err error
trueValue := true
falseValue := false

View File

@ -19,36 +19,36 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/autoscaling",
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/scheduling/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/scheduling/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -33,9 +33,9 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog"
)
const (
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
break
}
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func() {
@ -170,7 +170,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
replicas1 := additionalNodes1 * replicasPerNode
replicas2 := additionalNodes2 * replicasPerNode
glog.Infof("cores per node: %v", coresPerNode)
klog.Infof("cores per node: %v", coresPerNode)
// saturate cluster
initialReplicas := nodeCount
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
defer reservationCleanup()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
glog.Infof("Reserved successfully")
klog.Infof("Reserved successfully")
// configure pending pods & expected scale up #1
rcConfig := reserveMemoryRCConfig(f, "extra-pod-1", replicas1, additionalNodes1*perNodeReservation, largeScaleUpTimeout)
@ -191,7 +191,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
testCleanup1 := simpleScaleUpTestWithTolerance(f, config, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup1()
glog.Infof("Scaled up once")
klog.Infof("Scaled up once")
// configure pending pods & expected scale up #2
rcConfig2 := reserveMemoryRCConfig(f, "extra-pod-2", replicas2, additionalNodes2*perNodeReservation, largeScaleUpTimeout)
@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
testCleanup2 := simpleScaleUpTestWithTolerance(f, config2, tolerateUnreadyNodes, tolerateUnreadyPods)
defer testCleanup2()
glog.Infof("Scaled up twice")
klog.Infof("Scaled up twice")
})
It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func() {
@ -327,7 +327,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
By("Checking if the number of nodes is as expected")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
glog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
klog.Infof("Nodes: %v, expected: %v", len(nodes.Items), totalNodes)
Expect(len(nodes.Items)).Should(Equal(totalNodes))
})
@ -368,26 +368,6 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
})
func makeUnschedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeUnschedulable(f.ClientSet, &node)
if err != nil {
return err
}
}
return nil
}
func makeSchedulable(f *framework.Framework, nodes []v1.Node) error {
for _, node := range nodes {
err := makeNodeSchedulable(f.ClientSet, &node, false)
if err != nil {
return err
}
}
return nil
}
func anyKey(input map[string]int) string {
for k := range input {
return k
@ -410,7 +390,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
} else {
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
}
glog.Infof("cluster is increased")
klog.Infof("cluster is increased")
if tolerateMissingPodCount > 0 {
framework.ExpectNoError(waitForCaPodsReadyInNamespace(f, f.ClientSet, tolerateMissingPodCount))
} else {
@ -547,5 +527,5 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
func timeTrack(start time.Time, name string) {
elapsed := time.Since(start)
glog.Infof("%s took %s", name, elapsed)
klog.Infof("%s took %s", name, elapsed)
}

View File

@ -22,6 +22,7 @@ import (
"io/ioutil"
"math"
"net/http"
"os"
"os/exec"
"regexp"
"strconv"
@ -47,9 +48,9 @@ import (
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog"
)
const (
@ -82,6 +83,8 @@ const (
expendablePriorityClassName = "expendable-priority"
highPriorityClassName = "high-priority"
gpuLabel = "cloud.google.com/gke-accelerator"
)
var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
@ -112,8 +115,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
nodeCount = len(nodes.Items)
coreCount = 0
for _, node := range nodes.Items {
quentity := node.Status.Capacity[v1.ResourceCPU]
coreCount += quentity.Value()
quantity := node.Status.Allocatable[v1.ResourceCPU]
coreCount += quantity.Value()
}
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
Expect(nodeCount).NotTo(BeZero())
@ -129,16 +132,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
}
})
AfterEach(func() {
if framework.ProviderIs("gke") {
By("Remove changes introduced by NAP tests")
removeNAPNodePools()
disableAutoprovisioning()
}
framework.SkipUnlessProviderIs("gce", "gke")
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
expectedNodes := 0
@ -163,7 +161,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
break
}
glog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
klog.Infof("Made nodes schedulable again in %v", time.Since(s).String())
})
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -207,107 +205,123 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]",
func() { simpleScaleUpTest(0) })
supportedGpuTypes := []string{"nvidia-tesla-k80", "nvidia-tesla-v100", "nvidia-tesla-p100"}
for _, gpuType := range supportedGpuTypes {
gpuType := gpuType // create new variable for each iteration step
gpuType := os.Getenv("TESTED_GPU_TYPE")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
})
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
defer disableAutoscaler(gpuPoolName, 0, 2)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(2))
})
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 0)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
// Expect gpu pool to stay intact
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
framework.SkipUnlessProviderIs("gke")
if gpuType == "" {
framework.Failf("TEST_GPU_TYPE not defined")
return
}
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
const gpuPoolName = "gpu-pool"
addGpuNodePool(gpuPoolName, gpuType, 1, 1)
defer deleteNodePool(gpuPoolName)
installNvidiaDriversDaemonSet()
installNvidiaDriversDaemonSet()
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(scheduleGpuPod(f, "gpu-pod-rc"))
By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
defer disableAutoscaler(gpuPoolName, 0, 1)
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(1))
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
}
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
Expect(len(getPoolNodes(f, gpuPoolName))).Should(Equal(0))
})
It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]",
func() {
@ -335,10 +349,16 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
By("Expect no more scale-up to be happening after all pods are scheduled")
status, err = getScaleUpStatus(c)
// wait for a while until scale-up finishes; we cannot read CA status immediately
// after pods are scheduled as status config map is updated by CA once every loop iteration
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caNoScaleUpStatus
}, 2*freshStatusLimit)
framework.ExpectNoError(err)
if status.target != target {
glog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
klog.Warningf("Final number of nodes (%v) does not match initial scale-up target (%v).", status.target, target)
}
Expect(status.timestamp.Add(freshStatusLimit).Before(time.Now())).Should(Equal(false))
Expect(status.status).Should(Equal(caNoScaleUpStatus))
@ -355,14 +375,17 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+extraNodes, resizeTimeout))
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
// We wait for nodes to become schedulable to make sure the new nodes
// will be returned by getPoolNodes below.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, resizeTimeout))
klog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
By("Getting memory available on new nodes, so we can account for it when creating RC")
nodes := getPoolNodes(f, extraPoolName)
Expect(len(nodes)).Should(Equal(extraNodes))
extraMemMb := 0
for _, node := range nodes {
mem := node.Status.Capacity[v1.ResourceMemory]
mem := node.Status.Allocatable[v1.ResourceMemory]
extraMemMb += int((&mem).Value() / 1024 / 1024)
}
@ -491,7 +514,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
glog.Infof("RC and pods not using volume deleted")
klog.Infof("RC and pods not using volume deleted")
}()
By("waiting for all pods before triggering scale up")
@ -565,14 +588,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
newNodesSet.Delete(nodes...)
if len(newNodesSet) > 1 {
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
glog.Infof("Usually only 1 new node is expected, investigating")
glog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
klog.Infof("Usually only 1 new node is expected, investigating")
klog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
glog.Infof("Gcloud compute instances list: %s", output)
klog.Infof("Gcloud compute instances list: %s", output)
} else {
glog.Errorf("Failed to get instances list: %v", err)
klog.Errorf("Failed to get instances list: %v", err)
}
for newNode := range newNodesSet {
@ -580,9 +603,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
newNode,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
glog.Infof("Gcloud compute instances describe: %s", output)
klog.Infof("Gcloud compute instances describe: %s", output)
} else {
glog.Errorf("Failed to get instances describe: %v", err)
klog.Errorf("Failed to get instances describe: %v", err)
}
}
@ -597,7 +620,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
glog.Errorf("Failed to get node %v: %v", nodeName, err)
klog.Errorf("Failed to get node %v: %v", nodeName, err)
}
}
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
@ -858,8 +881,21 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)
}
// If new nodes are disconnected too soon, they'll be considered not started
// instead of unready, and cluster won't be considered unhealthy.
//
// More precisely, Cluster Autoscaler compares last transition time of
// several readiness conditions to node create time. If it's within
// 2 minutes, it'll assume node is just starting and not unhealthy.
//
// Nodes become ready in less than 1 minute after being created,
// so waiting extra 2 minutes before breaking them (which triggers
// readiness condition transition) should be sufficient, while
// making no assumptions about minimal node startup time.
time.Sleep(2 * time.Minute)
By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Floor(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
@ -894,106 +930,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
It("should add new node and new node pool on too big pod, scale down to 1 and scale down to 0 [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
By("Create first pod")
cleanupFunc1 := ReserveMemory(f, "memory-reservation1", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
defer func() {
if cleanupFunc1 != nil {
cleanupFunc1()
}
}()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
By("Create second pod")
cleanupFunc2 := ReserveMemory(f, "memory-reservation2", 1, int(1.1*float64(memAllocatableMb)), true, defaultTimeout)
defer func() {
if cleanupFunc2 != nil {
cleanupFunc2()
}
}()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, defaultTimeout))
By("Delete first pod")
cleanupFunc1()
cleanupFunc1 = nil
By("Waiting for scale down to 1")
// Verify that cluster size decreased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleDownTimeout))
By("Delete second pod")
cleanupFunc2()
cleanupFunc2 = nil
By("Waiting for scale down to 0")
// Verify that cluster size decreased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout))
By("Waiting for NAP group remove")
framework.ExpectNoError(waitTillAllNAPNodePoolsAreRemoved())
By("Check if NAP group was removeed")
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
})
It("shouldn't add new node group if not needed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
By("Create pods")
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemory(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout)
defer cleanupFunc()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
By("Check if NAP group was created hoping id didn't happen")
Expect(getNAPNodePoolsNumber()).Should(Equal(0))
})
It("shouldn't scale up if cores limit too low, should scale up after limit is changed [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
By(fmt.Sprintf("Set core limit to %d", coreCount))
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount)))
// Create pod allocating 1.1 allocatable for present nodes. Bigger node will have to be created.
cleanupFunc := ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, time.Second)
defer cleanupFunc()
By(fmt.Sprintf("Waiting for scale up hoping it won't happen, sleep for %s", scaleUpTimeout.String()))
time.Sleep(scaleUpTimeout)
// Verify that cluster size is not changed
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, time.Second))
By("Change resource limits")
framework.ExpectNoError(enableAutoprovisioning(fmt.Sprintf(`"resource_limits":{"name":"cpu", "minimum":2, "maximum":%d}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}`, coreCount+5)))
By("Wait for scale up")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
})
It("should create new node if there is no node for node selector [Feature:ClusterSizeAutoscalingScaleWithNAP]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoprovisioning(""))
// Create pod allocating 0.7 allocatable for present nodes with node selector.
cleanupFunc := ReserveMemoryWithSelector(f, "memory-reservation", 1, int(0.7*float64(memAllocatableMb)), true, scaleUpTimeout, map[string]string{"test": "test"})
defer cleanupFunc()
By("Waiting for scale up")
// Verify that cluster size increased.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, defaultTimeout))
By("Check if NAP group was created")
Expect(getNAPNodePoolsNumber()).Should(Equal(1))
})
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
@ -1006,8 +943,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
@ -1018,8 +953,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
@ -1032,8 +965,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@ -1045,8 +976,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
framework.SkipUnlessProviderIs("gce")
defer createPriorityClasses(f)()
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
@ -1066,7 +995,7 @@ func installNvidiaDriversDaemonSet() {
}
func execCmd(args ...string) *exec.Cmd {
glog.Infof("Executing: %s", strings.Join(args, " "))
klog.Infof("Executing: %s", strings.Join(args, " "))
return exec.Command(args[0], args[1:]...)
}
@ -1198,7 +1127,7 @@ func isRegionalCluster() bool {
}
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
glog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
klog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--enable-autoscaling",
@ -1208,10 +1137,10 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed config update result: %s", output)
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to enable autoscaling: %v", err)
}
glog.Infof("Config update result: %s", output)
klog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
@ -1225,17 +1154,17 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
}
func disableAutoscaler(nodePool string, minCount, maxCount int) error {
glog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
klog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
args := []string{"container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--no-enable-autoscaling",
"--node-pool=" + nodePool}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed config update result: %s", output)
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to disable autoscaling: %v", err)
}
glog.Infof("Config update result: %s", output)
klog.Infof("Config update result: %s", output)
var finalErr error
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
@ -1248,17 +1177,6 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
return fmt.Errorf("autoscaler still enabled, last error: %v", finalErr)
}
func isAutoprovisioningEnabled() (bool, error) {
strBody, err := getCluster("v1alpha1")
if err != nil {
return false, err
}
if strings.Contains(strBody, "\"enableNodeAutoprovisioning\": true") {
return true, nil
}
return false, nil
}
func executeHTTPRequest(method string, url string, body string) (string, error) {
client := &http.Client{}
req, err := http.NewRequest(method, url, strings.NewReader(body))
@ -1278,133 +1196,13 @@ func executeHTTPRequest(method string, url string, body string) (string, error)
return string(respBody), nil
}
func enableAutoprovisioning(resourceLimits string) error {
By("Using API to enable autoprovisioning.")
var body string
if resourceLimits != "" {
body = fmt.Sprintf(`{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, %s}}}`, resourceLimits)
} else {
body = `{"update": {"desired_cluster_autoscaling": {"enable_node_autoprovisioning": true, "resource_limits":{"name":"cpu", "minimum":0, "maximum":100}, "resource_limits":{"name":"memory", "minimum":0, "maximum":10000000}}}}`
}
_, err := executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), body)
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
glog.Infof("Wait for enabling autoprovisioning.")
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if enabled {
By("Autoprovisioning enabled.")
return nil
}
glog.Infof("Waiting for enabling autoprovisioning")
}
return fmt.Errorf("autoprovisioning wasn't enabled (timeout).")
}
func disableAutoprovisioning() error {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if !enabled {
By("Autoprovisioning disabled.")
return nil
}
By("Using API to disable autoprovisioning.")
_, err = executeHTTPRequest(http.MethodPut, getGKEClusterURL("v1alpha1"), "{\"update\": {\"desired_cluster_autoscaling\": {}}}")
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
By("Wait for disabling autoprovisioning.")
for start := time.Now(); time.Since(start) < gkeUpdateTimeout; time.Sleep(30 * time.Second) {
enabled, err := isAutoprovisioningEnabled()
if err != nil {
glog.Errorf("Error: %s", err.Error())
return err
}
if !enabled {
By("Autoprovisioning disabled.")
return nil
}
By("Waiting for disabling autoprovisioning")
}
return fmt.Errorf("autoprovisioning wasn't disabled (timeout).")
}
func getNAPNodePools() ([]string, error) {
if framework.ProviderIs("gke") {
args := []string{"container", "node-pools", "list", "--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Errorf("Failed to get instance groups: %v", string(output))
return nil, err
}
re := regexp.MustCompile("nap.* ")
lines := re.FindAllString(string(output), -1)
for i, line := range lines {
lines[i] = line[:strings.Index(line, " ")]
}
return lines, nil
} else {
return nil, fmt.Errorf("provider does not support NAP")
}
}
func removeNAPNodePools() error {
By("Remove NAP node pools")
pools, err := getNAPNodePools()
if err != nil {
return err
}
for _, pool := range pools {
By("Remove node pool: " + pool)
suffix := fmt.Sprintf("projects/%s/zones/%s/clusters/%s/nodePools/%s",
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster,
pool)
_, err := executeHTTPRequest(http.MethodDelete, getGKEURL("v1alpha1", suffix), "")
if err != nil {
glog.Errorf("Request error: %s", err.Error())
return err
}
}
err = waitTillAllNAPNodePoolsAreRemoved()
if err != nil {
glog.Errorf(fmt.Sprintf("Couldn't remove NAP groups: %s", err.Error()))
}
return err
}
func getNAPNodePoolsNumber() int {
groups, err := getNAPNodePools()
framework.ExpectNoError(err)
return len(groups)
}
func waitTillAllNAPNodePoolsAreRemoved() error {
By("Wait till all NAP node pools are removed")
err := wait.PollImmediate(5*time.Second, defaultTimeout, func() (bool, error) {
return getNAPNodePoolsNumber() == 0, nil
})
return err
}
func addNodePool(name string, machineType string, numNodes int) {
args := []string{"container", "node-pools", "create", name, "--quiet",
"--machine-type=" + machineType,
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
glog.Infof("Creating node-pool %s: %s", name, output)
klog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
@ -1414,12 +1212,12 @@ func addGpuNodePool(name string, gpuType string, gpuCount int, numNodes int) {
"--num-nodes=" + strconv.Itoa(numNodes),
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
glog.Infof("Creating node-pool %s: %s", name, output)
klog.Infof("Creating node-pool %s: %s", name, output)
framework.ExpectNoError(err, string(output))
}
func deleteNodePool(name string) {
glog.Infof("Deleting node pool %s", name)
klog.Infof("Deleting node pool %s", name)
args := []string{"container", "node-pools", "delete", name, "--quiet",
"--cluster=" + framework.TestContext.CloudConfig.Cluster}
err := wait.ExponentialBackoff(
@ -1427,10 +1225,10 @@ func deleteNodePool(name string) {
func() (bool, error) {
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
if err != nil {
glog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
klog.Warningf("Error deleting nodegroup - error:%v, output: %s", err, output)
return false, nil
}
glog.Infof("Node-pool deletion output: %s", output)
klog.Infof("Node-pool deletion output: %s", output)
return true, nil
})
framework.ExpectNoError(err)
@ -1456,7 +1254,7 @@ func getPoolInitialSize(poolName string) int {
"--cluster=" + framework.TestContext.CloudConfig.Cluster,
"--format=value(initialNodeCount)"}
output, err := execCmd(getGcloudCommand(args)...).CombinedOutput()
glog.Infof("Node-pool initial size: %s", output)
klog.Infof("Node-pool initial size: %s", output)
framework.ExpectNoError(err, string(output))
fields := strings.Fields(string(output))
Expect(len(fields)).Should(Equal(1))
@ -1504,7 +1302,7 @@ func doPut(url, content string) (string, error) {
return strBody, nil
}
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, priorityClassName string) func() error {
func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration, priorityClassName string) func() error {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
@ -1517,12 +1315,13 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
Replicas: replicas,
MemRequest: request,
NodeSelector: selector,
Tolerations: tolerations,
PriorityClassName: priorityClassName,
}
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
err := framework.RunRC(*config)
if err != nil && strings.Contains(err.Error(), "Error creating replication controller") {
glog.Warningf("Failed to create memory reservation: %v", err)
klog.Warningf("Failed to create memory reservation: %v", err)
continue
}
if expectRunning {
@ -1539,19 +1338,19 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
// ReserveMemoryWithPriority creates a replication controller with pods with priority that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithPriority(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, priorityClassName string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, priorityClassName)
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, priorityClassName)
}
// ReserveMemoryWithSelector creates a replication controller with pods with node selector that, in summation,
// request the specified amount of memory.
func ReserveMemoryWithSelector(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, "")
func ReserveMemoryWithSelectorAndTolerations(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration, selector map[string]string, tolerations []v1.Toleration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, selector, tolerations, "")
}
// ReserveMemory creates a replication controller with pods that, in summation,
// request the specified amount of memory.
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool, timeout time.Duration) func() error {
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, "")
return reserveMemory(f, id, replicas, megabytes, expectRunning, timeout, nil, nil, "")
}
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
@ -1566,7 +1365,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
klog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
@ -1578,10 +1377,10 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
numReady := len(nodes.Items)
if numNodes == numReady+expectedUnready && sizeFunc(numNodes) {
glog.Infof("Cluster has reached the desired size")
klog.Infof("Cluster has reached the desired size")
return nil
}
glog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
klog.Infof("Waiting for cluster with func, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
@ -1604,21 +1403,21 @@ func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface
// Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check.
if pod.Status.Phase == v1.PodFailed {
glog.Warningf("Pod has failed: %v", pod)
klog.Warningf("Pod has failed: %v", pod)
}
if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name)
}
}
if len(notready) <= tolerateUnreadyCount {
glog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
klog.Infof("sufficient number of pods ready. Tolerating %d unready", tolerateUnreadyCount)
return nil
}
glog.Infof("Too many pods are not ready yet: %v", notready)
klog.Infof("Too many pods are not ready yet: %v", notready)
}
glog.Info("Timeout on waiting for pods being ready")
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
klog.Info("Timeout on waiting for pods being ready")
klog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
klog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
// Some pods are still not running.
return fmt.Errorf("Too many pods are still not running: %v", notready)
@ -1633,11 +1432,11 @@ func getAnyNode(c clientset.Interface) *v1.Node {
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Errorf("Failed to get node list: %v", err)
klog.Errorf("Failed to get node list: %v", err)
return nil
}
if len(nodes.Items) == 0 {
glog.Errorf("No nodes")
klog.Errorf("No nodes")
return nil
}
return &nodes.Items[0]
@ -1696,7 +1495,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
if !errors.IsConflict(err) {
return err
}
glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to taint node in allowed number of retries")
}
@ -1737,12 +1536,18 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
if !errors.IsConflict(err) {
return err
}
glog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
klog.Warningf("Got 409 conflict when trying to taint node, retries left: %v", 3-j)
}
return fmt.Errorf("Failed to remove taint from node in allowed number of retries")
}
func scheduleGpuPod(f *framework.Framework, id string) error {
// ScheduleAnySingleGpuPod schedules a pod which requires single GPU of any type
func ScheduleAnySingleGpuPod(f *framework.Framework, id string) error {
return ScheduleGpuPod(f, id, "", 1)
}
// ScheduleGpuPod schedules a pod which requires a given number of gpus of given type
func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit int64) error {
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
@ -1751,10 +1556,14 @@ func scheduleGpuPod(f *framework.Framework, id string) error {
Timeout: 3 * scaleUpTimeout, // spinning up GPU node is slow
Image: imageutils.GetPauseImageName(),
Replicas: 1,
GpuLimit: 1,
GpuLimit: gpuLimit,
Labels: map[string]string{"requires-gpu": "yes"},
}
if gpuType != "" {
config.NodeSelector = map[string]string{gpuLabel: gpuType}
}
err := framework.RunRC(*config)
if err != nil {
return err
@ -1906,7 +1715,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
if !errors.IsConflict(err) {
return err
}
glog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
if err != nil {
return err
@ -1957,7 +1766,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin
}
resized := setMigSizes(newSizes)
if resized {
glog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
klog.Warning("Unexpected node group size while waiting for cluster resize. Setting size to target again.")
}
return false
}
@ -2062,7 +1871,7 @@ func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) {
}
result.target += newTarget
}
glog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
klog.Infof("Cluster-Autoscaler scale-up status: %v (%v, %v)", result.status, result.ready, result.target)
return &result, nil
}
@ -2100,7 +1909,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
if err != nil {
// log error, but attempt to remove other pdbs
glog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
finalErr = err
}
}
@ -2152,12 +1961,18 @@ func createPriorityClasses(f *framework.Framework) func() {
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Create(&schedulerapi.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
klog.Errorf("Error creating priority class: %v", err)
}
Expect(err == nil || errors.IsAlreadyExists(err)).To(Equal(true))
}
return func() {
for className := range priorityClasses {
f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
err := f.ClientSet.SchedulingV1beta1().PriorityClasses().Delete(className, nil)
if err != nil {
klog.Errorf("Error deleting priority class: %v", err)
}
}
}
}

View File

@ -86,7 +86,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
initialReplicas := 2
// metric should cause scale down
metricValue := externalMetricValue
metricTarget := 2 * metricValue
metricTarget := 3 * metricValue
metricTargets := map[string]externalMetricTarget{
"target": {
value: metricTarget,
@ -109,7 +109,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
initialReplicas := 2
// metric should cause scale down
metricValue := externalMetricValue
metricAverageTarget := 2 * metricValue
metricAverageTarget := 3 * metricValue
metricTargets := map[string]externalMetricTarget{
"target_average": {
value: metricAverageTarget,

View File

@ -46,7 +46,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
})
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via deployments
// CPU tests via ReplicaSets
It(titleUp, func() {
scaleUp("rs", common.KindReplicaSet, false, rc, f)
})
@ -96,13 +96,13 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
// HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct {
initPods int32
totalInitialCPUUsage int32
initPods int
totalInitialCPUUsage int
perPodCPURequest int64
targetCPUUtilizationPercent int32
minPods int32
maxPods int32
firstScale int32
firstScale int
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
@ -116,13 +116,14 @@ type HPAScaleTest struct {
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
defer rc.CleanUp()
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait)
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
}
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
@ -157,7 +158,7 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r
}
scaleTest := &HPAScaleTest{
initPods: 5,
totalInitialCPUUsage: 375,
totalInitialCPUUsage: 325,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 30,
minPods: 1,

View File

@ -12,6 +12,7 @@ go_library(
"autoscaling_utils.go",
"configmap.go",
"configmap_volume.go",
"container.go",
"container_probe.go",
"docker_containers.go",
"downward_api.go",
@ -21,51 +22,69 @@ go_library(
"expansion.go",
"host_path.go",
"init_container.go",
"kubelet.go",
"kubelet_etc_hosts.go",
"lifecycle_hook.go",
"networking.go",
"node_lease.go",
"pods.go",
"privileged.go",
"projected.go",
"projected_combined.go",
"projected_configmap.go",
"projected_downwardapi.go",
"projected_secret.go",
"runtime.go",
"secrets.go",
"secrets_volume.go",
"security_context.go",
"sysctl.go",
"ttlafterfinished.go",
"util.go",
"volumes.go",
],
importpath = "k8s.io/kubernetes/test/e2e/common",
deps = [
"//pkg/api/v1/node:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/util/slice:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/coordination/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -117,7 +117,7 @@ done`, testCmd)
Affinity: loaderAffinity,
Containers: []api.Container{{
Name: "test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", testCmd},
}},
RestartPolicy: api.RestartPolicyNever,

View File

@ -43,7 +43,7 @@ import (
const (
dynamicConsumptionTimeInSeconds = 30
staticConsumptionTimeInSeconds = 3600
dynamicRequestSizeInMillicores = 20
dynamicRequestSizeInMillicores = 100
dynamicRequestSizeInMegabytes = 100
dynamicRequestSizeCustomMetric = 10
port = 80
@ -359,6 +359,10 @@ func (rc *ResourceConsumer) GetReplicas() int {
return 0
}
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
}
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
@ -369,13 +373,25 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
}
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration) {
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) {
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName)
}
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
framework.Logf("expecting there to be %d replicas (are: %d)", desiredReplicas, replicas)
if replicas != desiredReplicas {
return false, fmt.Errorf("number of replicas changed unexpectedly")
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
as, err := rc.GetHpa(hpaName)
if err != nil {
framework.Logf("Error getting HPA: %s", err)
} else {
framework.Logf("HPA status: %+v", as.Status)
}
if replicas < minDesiredReplicas {
return false, fmt.Errorf("number of replicas below target")
} else if replicas > maxDesiredReplicas {
return false, fmt.Errorf("number of replicas above target")
} else {
return false, nil // Expected number of replicas found. Continue polling until timeout.
}

View File

@ -20,19 +20,21 @@ import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-api-machinery] ConfigMap", func() {
var _ = Describe("[sig-node] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-in-env-field
Description: Make sure config map value can be used as an environment
variable in the container (on container.env field)
Release : v1.9
Testname: ConfigMap, from environment field
Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment.
*/
framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
@ -51,7 +53,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@ -78,9 +80,9 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
})
/*
Testname: configmap-envfrom-field
Description: Make sure config map value can be used as an source for
environment variables in the container (on container.envFrom field)
Release: v1.9
Testname: ConfigMap, from environment variables
Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
@ -99,7 +101,7 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{
@ -121,6 +123,11 @@ var _ = Describe("[sig-api-machinery] ConfigMap", func() {
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
})
})
It("should fail to create configMap in volume due to empty configmap key", func() {
configMap, err := newConfigMapWithEmptyKey(f)
Expect(err).To(HaveOccurred(), "created configMap %q with empty key in namespace %q", configMap.Name, f.Namespace.Name)
})
})
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
@ -136,3 +143,19 @@ func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
},
}
}
func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
name := "configmap-test-emptyKey-" + string(uuid.NewUUID())
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"": "value-1",
},
}
By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
}

View File

@ -27,24 +27,25 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = Describe("[sig-storage] ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
/*
Testname: configmap-nomap-simple
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with no other settings.
Release : v1.9
Testname: ConfigMap Volume, without mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Testname: configmap-nomap-default-mode
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod with defaultMode set
Release : v1.9
Testname: ConfigMap Volume, without mapping, volume mode set
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of 0x400
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@ -57,9 +58,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-nomap-user
Description: Make sure config map without mappings works by mounting it
to a volume with a custom path (mapping) on the pod as non-root.
Release : v1.9
Testname: ConfigMap Volume, without mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
@ -70,19 +71,18 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-simple-mapped
Description: Make sure config map works by mounting it to a volume with
a custom path (mapping) on the pod with no other settings and make sure
the pod actually consumes it.
Release : v1.9
Testname: ConfigMap Volume, with mapping
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Testname: configmap-with-item-mode-mapped
Description: Make sure config map works with an item mode (e.g. 0400)
for the config map item.
Release : v1.9
Testname: ConfigMap Volume, with mapping, volume mode set
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of 0x400
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
mode := int32(0400)
@ -90,8 +90,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-simple-user-mapped
Description: Make sure config map works when it is mounted as non-root.
Release : v1.9
Testname: ConfigMap Volume, with mapping, non-root user
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
@ -102,9 +103,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-update-test
Description: Make sure update operation is working on config map and
the result is observed on volumes mounted in containers.
Release : v1.9
Testname: ConfigMap Volume, update
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@ -151,7 +152,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -184,7 +185,12 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
It("binary data should be reflected in volume [NodeConformance]", func() {
/*
Release: v1.12
Testname: ConfigMap Volume, text data, binary data
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
*/
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
@ -233,7 +239,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -245,7 +251,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: containerName2,
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"hexdump", "-C", "/etc/configmap-volume/dump.bin"},
VolumeMounts: []v1.VolumeMount{
{
@ -276,9 +282,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-CUD-test
Description: Make sure Create, Update, Delete operations are all working
on config map and the result is observed on volumes mounted in containers.
Release : v1.9
Testname: ConfigMap Volume, create, update and delete
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@ -379,7 +385,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -391,7 +397,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@ -403,7 +409,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -459,9 +465,9 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
/*
Testname: configmap-multiple-volumes
Description: Make sure config map works when it mounted as two different
volumes on the same node.
Release : v1.9
Testname: ConfigMap Volume, multiple volume maps
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
var (
@ -509,7 +515,7 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -534,6 +540,26 @@ var _ = Describe("[sig-storage] ConfigMap", func() {
})
})
//The pod is in pending during volume creation until the configMap objects are available
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes"
podName := "pod-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPod(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
})
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes"
podName := "pod-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
})
})
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
@ -589,7 +615,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
@ -675,7 +701,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
@ -718,3 +744,115 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "createcm-volume-test"
createVolumeName := "createcm-volume"
//creating a pod without configMap object created, by mentioning the configMap volume source's local reference name
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: createName,
},
Optional: &falseValue,
},
},
},
},
Containers: []v1.Container{
{
Name: createContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "createcm-volume-test"
createVolumeName := "createcm-volume"
configMap := newConfigMap(f, createName)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
//creating a pod with configMap object, but with different key which is not present in configMap object.
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: createName,
},
Items: []v1.KeyToPath{
{
Key: "data-4",
Path: "path/to/data-4",
},
},
Optional: &falseValue,
},
},
},
},
Containers: []v1.Container{
{
Name: createContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}

134
vendor/k8s.io/kubernetes/test/e2e/common/container.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
ContainerStatusRetryTimeout = time.Minute * 5
ContainerStatusPollInterval = time.Second * 1
)
// One pod one container
type ConformanceContainer struct {
Container v1.Container
RestartPolicy v1.RestartPolicy
Volumes []v1.Volume
ImagePullSecrets []string
PodClient *framework.PodClient
podName string
PodSecurityContext *v1.PodSecurityContext
}
func (cc *ConformanceContainer) Create() {
cc.podName = cc.Container.Name + string(uuid.NewUUID())
imagePullSecrets := []v1.LocalObjectReference{}
for _, s := range cc.ImagePullSecrets {
imagePullSecrets = append(imagePullSecrets, v1.LocalObjectReference{Name: s})
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: cc.podName,
},
Spec: v1.PodSpec{
RestartPolicy: cc.RestartPolicy,
Containers: []v1.Container{
cc.Container,
},
SecurityContext: cc.PodSecurityContext,
Volumes: cc.Volumes,
ImagePullSecrets: imagePullSecrets,
},
}
cc.PodClient.Create(pod)
}
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
}
func (cc *ConformanceContainer) IsReady() (bool, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return false, err
}
return podutil.IsPodReady(pod), nil
}
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return v1.PodUnknown, err
}
return pod.Status.Phase, nil
}
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return v1.ContainerStatus{}, err
}
statuses := pod.Status.ContainerStatuses
if len(statuses) != 1 || statuses[0].Name != cc.Container.Name {
return v1.ContainerStatus{}, fmt.Errorf("unexpected container statuses %v", statuses)
}
return statuses[0], nil
}
func (cc *ConformanceContainer) Present() (bool, error) {
_, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err == nil {
return true, nil
}
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
type ContainerState string
const (
ContainerStateWaiting ContainerState = "Waiting"
ContainerStateRunning ContainerState = "Running"
ContainerStateTerminated ContainerState = "Terminated"
ContainerStateUnknown ContainerState = "Unknown"
)
func GetContainerState(state v1.ContainerState) ContainerState {
if state.Waiting != nil {
return ContainerStateWaiting
}
if state.Running != nil {
return ContainerStateRunning
}
if state.Terminated != nil {
return ContainerStateTerminated
}
return ContainerStateUnknown
}

View File

@ -50,9 +50,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-readiness-probe-initial-delay
Description: Make sure that pod with readiness probe should not be
ready before initial delay and never restart.
Release : v1.9
Testname: Pod readiness probe, with initial delay
Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay.
*/
framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
@ -82,9 +82,10 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-readiness-probe-failure
Description: Make sure that pod with readiness probe that fails should
never be ready and never restart.
Release : v1.9
Testname: Pod readiness probe, failure
Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created,
then the Pod MUST never be ready, never be running and restart count MUST be zero.
*/
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
@ -107,9 +108,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-cat-liveness-probe-restarted
Description: Make sure the pod is restarted with a cat /tmp/health
liveness probe.
Release : v1.9
Testname: Pod liveness probe, using local file, restart
Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@ -121,7 +122,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@ -139,9 +140,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-cat-liveness-probe-not-restarted
Description: Make sure the pod is not restarted with a cat /tmp/health
liveness probe.
Release : v1.9
Testname: Pod liveness probe, using local file, no restart
Description: Pod is created with liveness probe that uses exec command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0.
*/
framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@ -153,7 +154,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@ -171,9 +172,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-http-liveness-probe-restarted
Description: Make sure when http liveness probe fails, the pod should
be restarted.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, restart
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1.
*/
framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@ -205,9 +206,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
// Slow by design (5 min)
/*
Testname: pods-restart-count
Description: Make sure when a pod gets restarted, its start count
should increase.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, multiple restarts (slow)
Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment everytime health check fails, measure upto 5 restart.
*/
framework.ConformanceIt("should have monotonically increasing restart count [Slow][NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@ -238,9 +239,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-http-liveness-probe-not-restarted
Description: Make sure when http liveness probe succeeds, the pod
should not be restarted.
Release : v1.9
Testname: Pod liveness probe, using http endpoint, failure
Description: A Pod is created with liveness probe on http endpoint /. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero.
*/
framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func() {
runLivenessTest(f, &v1.Pod{
@ -252,7 +253,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
@ -272,9 +273,9 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
/*
Testname: pods-docker-liveness-probe-timeout
Description: Make sure that the pod is restarted with a docker exec
liveness probe with timeout.
Release : v1.9
Testname: Pod liveness probe, docker exec, restart
Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod.
*/
It("should be restarted with a docker exec liveness probe with timeout ", func() {
// TODO: enable this test once the default exec handler supports timeout.
@ -288,7 +289,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
Containers: []v1.Container{
{
Name: "liveness",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{

View File

@ -28,10 +28,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
f := framework.NewDefaultFramework("containers")
/*
Testname: container-without-command-args
Description: When a Pod is created neither 'command' nor 'args' are
provided for a Container, ensure that the docker image's default
command and args are used.
Release : v1.9
Testname: Docker containers, without command and arguments
Description: Default command and arguments from the docker image entrypoint MUST be used when Pod does not specify the container command
*/
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
@ -40,10 +39,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
})
/*
Testname: container-with-args
Description: When a Pod is created and 'args' are provided for a
Container, ensure that they take precedent to the docker image's
default arguments, but that the default command is used.
Release : v1.9
Testname: Docker containers, with arguments
Description: Default command and from the docker image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified.
*/
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() {
pod := entrypointTestPod()
@ -57,10 +55,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
// Note: when you override the entrypoint, the image's arguments (docker cmd)
// are ignored.
/*
Testname: container-with-command
Description: When a Pod is created and 'command' is provided for a
Container, ensure that it takes precedent to the docker image's default
command.
Release : v1.9
Testname: Docker containers, with command
Description: Default command from the docker image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image.
*/
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() {
pod := entrypointTestPod()
@ -72,10 +69,9 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
})
/*
Testname: container-with-command-args
Description: When a Pod is created and 'command' and 'args' are
provided for a Container, ensure that they take precedent to the docker
image's default command and arguments.
Release : v1.9
Testname: Docker containers, with command and arguments
Description: Default command and arguments from the docker image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image.
*/
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
pod := entrypointTestPod()

View File

@ -23,8 +23,9 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/kubernetes/pkg/util/version"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@ -34,13 +35,13 @@ var (
podUIDVersion = utilversion.MustParseSemantic("v1.8.0")
)
var _ = Describe("[sig-api-machinery] Downward API", func() {
var _ = Describe("[sig-node] Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
/*
Testname: downwardapi-env-name-namespace-podip
Description: Ensure that downward API can provide pod's name, namespace
and IP address as environment variables.
Release : v1.9
Testname: DownwardAPI, environment for name, namespace and ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@ -84,9 +85,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-host-ip
Description: Ensure that downward API can provide an IP address for
host node as an environment variable.
Release : v1.9
Testname: DownwardAPI, environment for host ip
Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container.
*/
framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func() {
framework.SkipUnlessServerVersionGTE(hostIPVersion, f.ClientSet.Discovery())
@ -111,9 +112,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-limits-requests
Description: Ensure that downward API can provide CPU/memory limit
and CPU/memory request as environment variables.
Release : v1.9
Testname: DownwardAPI, environment for CPU and memory limits and requests
Description: Downward API MUST expose CPU request amd Memory request set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@ -162,10 +163,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-default-allocatable
Description: Ensure that downward API can provide default node
allocatable values for CPU and memory as environment variables if CPU
and memory limits are not specified for a container.
Release : v1.9
Testname: DownwardAPI, environment for default CPU and memory limits and requests
Description: Downward API MUST expose CPU request amd Memory limits set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
@ -200,7 +200,7 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: env,
},
@ -213,9 +213,9 @@ var _ = Describe("[sig-api-machinery] Downward API", func() {
})
/*
Testname: downwardapi-env-pod-uid
Description: Ensure that downward API can provide pod UID as an
environment variable.
Release : v1.9
Testname: DownwardAPI, environment for Pod UID
Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container.
*/
framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func() {
framework.SkipUnlessServerVersionGTE(podUIDVersion, f.ClientSet.Discovery())
@ -300,7 +300,7 @@ var _ = framework.KubeDescribe("Downward API [Serial] [Disruptive] [NodeFeature:
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: env,
},
@ -325,7 +325,7 @@ func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, ex
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@ -357,7 +357,7 @@ func testDownwardAPIForEphemeralStorage(f *framework.Framework, podName string,
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -32,7 +33,7 @@ import (
var _ = Describe("[sig-storage] Downward API volume", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
const podLogTimeout = 3 * time.Minute
f := framework.NewDefaultFramework("downward-api")
var podClient *framework.PodClient
BeforeEach(func() {
@ -40,9 +41,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-podname
Description: Ensure that downward API can provide pod's name through
DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, pod name
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -54,9 +55,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-set-default-mode
Description: Ensure that downward API can set default file permission
mode for DownwardAPIVolumeFiles if no mode is specified.
Release : v1.9
Testname: DownwardAPI volume, volume mode 0400
Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -69,9 +70,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-set-mode
Description: Ensure that downward API can set file permission mode for
DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, file mode 0400
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
*/
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -113,9 +114,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-update-label
Description: Ensure that downward API updates labels in
DownwardAPIVolumeFiles when pod's labels get modified.
Release : v1.9
Testname: DownwardAPI volume, update label
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
labels := map[string]string{}
@ -145,9 +146,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-update-annotation
Description: Ensure that downward API updates annotations in
DownwardAPIVolumeFiles when pod's annotations get modified.
Release : v1.9
Testname: DownwardAPI volume, update annotations
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
annotations := map[string]string{}
@ -179,9 +180,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-cpu-limit
Description: Ensure that downward API can provide container's CPU limit
through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, CPU limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -193,9 +194,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-memory-limit
Description: Ensure that downward API can provide container's memory
limit through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, memory limits
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -207,9 +208,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-cpu-request
Description: Ensure that downward API can provide container's CPU
request through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, CPU request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -221,9 +222,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-memory-request
Description: Ensure that downward API can provide container's memory
request through DownwardAPIVolumeFiles.
Release : v1.9
Testname: DownwardAPI volume, memory request
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -235,10 +236,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-default-cpu
Description: Ensure that downward API can provide default node
allocatable value for CPU through DownwardAPIVolumeFiles if CPU
limit is not specified for a container.
Release : v1.9
Testname: DownwardAPI volume, CPU limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -248,10 +248,9 @@ var _ = Describe("[sig-storage] Downward API volume", func() {
})
/*
Testname: downwardapi-volume-default-memory
Description: Ensure that downward API can provide default node
allocatable value for memory through DownwardAPIVolumeFiles if memory
limit is not specified for a container.
Release : v1.9
Testname: DownwardAPI volume, memory limit, default node allocatable
Description: A Pod is configured with DownwardAPIVolumeSource and DownwartAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
@ -268,7 +267,7 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@ -294,7 +293,7 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@ -325,7 +324,7 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@ -353,7 +352,7 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container
return []v1.Container{
{
Name: name,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
@ -372,7 +371,7 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{

View File

@ -67,139 +67,126 @@ var _ = Describe("[sig-storage] EmptyDir volumes", func() {
})
/*
Testname: volume-emptydir-mode-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure the volume has 0777 unix file permissions and tmpfs
mount type.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode default
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
*/
framework.ConformanceIt("volume on tmpfs should have the correct mode [NodeConformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0644,tmpfs) [NodeConformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0666,tmpfs) [NodeConformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-root-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a root owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0777,tmpfs) [NodeConformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0644-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0644 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0644, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0644,tmpfs) [NodeConformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0666-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0666 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0666,, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0666,tmpfs) [NodeConformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-user-0777-tmpfs
Description: For a Pod created with an 'emptyDir' Volume with 'medium'
of 'Memory', ensure a user owned file with 0777 unix file permissions
is created correctly, has tmpfs mount type, and enforces the permissions.
Release : v1.9
Testname: EmptyDir, medium memory, volume mode 0777, non-root user
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0777,tmpfs) [NodeConformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
/*
Testname: volume-emptydir-mode
Description: For a Pod created with an 'emptyDir' Volume, ensure the
volume has 0777 unix file permissions.
Release : v1.9
Testname: EmptyDir, medium default, volume mode default
Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
*/
framework.ConformanceIt("volume on default medium should have the correct mode [NodeConformance]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0644 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0644,default) [NodeConformance]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0666 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0666,default) [NodeConformance]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-root-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
root owned file with 0777 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (root,0777,default) [NodeConformance]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0644
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0644 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0644
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0644,default) [NodeConformance]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0666
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0666 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0666
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0666,default) [NodeConformance]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
/*
Testname: volume-emptydir-user-0777
Description: For a Pod created with an 'emptyDir' Volume, ensure a
user owned file with 0777 unix file permissions is created and enforced
correctly.
Release : v1.9
Testname: EmptyDir, medium default, volume mode 0777
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
*/
framework.ConformanceIt("should support (non-root,0777,default) [NodeConformance]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)

View File

@ -21,6 +21,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -33,9 +34,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
f := framework.NewDefaultFramework("var-expansion")
/*
Testname: var-expansion-env
Description: Make sure environment variables can be set using an
expansion of previously defined environment variables
Release : v1.9
Testname: Environment variables, expansion
Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@ -48,7 +49,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@ -78,9 +79,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
})
/*
Testname: var-expansion-command
Description: Make sure a container's commands can be set using an
expansion of environment variables.
Release : v1.9
Testname: Environment variables, command expansion
Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@ -93,7 +94,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
@ -113,9 +114,9 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
})
/*
Testname: var-expansion-arg
Description: Make sure a container's args can be set using an
expansion of environment variables.
Release : v1.9
Testname: Environment variables, command argument expansion
Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values.
*/
framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
@ -164,7 +165,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "test -d /testcontainer/" + podName + ";echo $?"},
Env: []v1.EnvVar{
{
@ -225,7 +226,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Env: []v1.EnvVar{
{
Name: "POD_NAME",
@ -274,7 +275,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
Containers: []v1.Container{
{
Name: "dapi-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Env: []v1.EnvVar{
{
Name: "POD_NAME",

View File

@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
@ -40,10 +41,9 @@ var _ = Describe("[sig-storage] HostPath", func() {
})
/*
Testname: volume-hostpath-mode
Description: For a Pod created with a 'HostPath' Volume, ensure the
volume is a directory with 0777 unix file permissions and that is has
the sticky bit (mode flag t) set.
Release : v1.9
Testname: Host path, volume mode default
Description: Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set.
*/
framework.ConformanceIt("should give a volume the correct mode [NodeConformance]", func() {
source := &v1.HostPathVolumeSource{
@ -116,91 +116,6 @@ var _ = Describe("[sig-storage] HostPath", func() {
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
It("should support existing directory subPath", func() {
framework.SkipUnlessSSHKeyPresent()
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180
filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
pod.Spec.NodeName = nodeList.Items[0].Name
// Create the subPath directory on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("mkdir -p %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
framework.Failf("mkdir returned non-zero")
}
// Write the file in the subPath from container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
fmt.Sprintf("--file_mode=%v", filePathInWriter),
}
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
// TODO consolidate common code of this test and above
It("should support existing single file subPath", func() {
framework.SkipUnlessSSHKeyPresent()
subPath := "sub-path-test-file"
retryDuration := 180
filePathInReader := path.Join(volumePath, subPath)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
pod.Spec.NodeName = nodeList.Items[0].Name
// Create the subPath file on the host
existing := path.Join(source.Path, subPath)
result, err := framework.SSH(fmt.Sprintf("echo \"mount-tester new file\" > %s", existing), framework.GetNodeExternalIP(&nodeList.Items[0]), framework.TestContext.Provider)
framework.LogSSHResult(result)
framework.ExpectNoError(err)
if result.Code != 0 {
framework.Failf("echo returned non-zero")
}
// Mount the file to the subPath in container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
})
//These constants are borrowed from the other test.
@ -236,7 +151,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
Containers: []v1.Container{
{
Name: containerName1,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
@ -249,7 +164,7 @@ func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
},
{
Name: containerName2,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,

View File

@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"strconv"
"time"
@ -26,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/watch"
watchtools "k8s.io/client-go/tools/watch"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
@ -42,7 +44,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
podClient = f.PodClient()
})
It("should invoke init containers on a RestartNever pod", func() {
/*
Release: v1.12
Testname: init-container-starts-app-restartnever-pod
Description: Ensure that all InitContainers are started
and all containers in pod are voluntarily terminated with exit status 0,
and the system is not going to restart any of these containers
when Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should invoke init containers on a RestartNever pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -59,19 +69,19 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@ -82,7 +92,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
@ -99,7 +111,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
})
It("should invoke init containers on a RestartAlways pod", func() {
/*
Release: v1.12
Testname: init-container-starts-app-restartalways-pod
Description: Ensure that all InitContainers are started
and all containers in pod started
and at least one container is still running or is in the process of being restarted
when Pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should invoke init containers on a RestartAlways pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -115,12 +135,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@ -131,7 +151,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@ -143,7 +163,9 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(ctx, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
@ -160,7 +182,15 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
})
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
/*
Release: v1.12
Testname: init-container-fails-stops-app-restartalways-pod
Description: Ensure that app container is not started
when all InitContainers failed to start
and Pod has restarted for few occurrences
and pod has restart policy as RestartAlways.
*/
framework.ConformanceIt("should not start app containers if init containers fail on a RestartAlways pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -177,12 +207,12 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/false"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
},
@ -193,7 +223,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@ -206,8 +236,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(
ctx, wr,
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
@ -268,7 +300,13 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
})
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
/*
Release: v1.12
Testname: init-container-fails-stops-app-restartnever-pod
Description: Ensure that app container is not started
when at least one InitContainer fails to start and Pod has restart policy as RestartNever.
*/
framework.ConformanceIt("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@ -285,24 +323,24 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
InitContainers: []v1.Container{
{
Name: "init1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/false"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/true"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(50*1024*1024, resource.DecimalSI),
},
},
},
@ -316,8 +354,10 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
defer cancel()
event, err := watchtools.UntilWithoutRetry(
ctx, wr,
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {

227
vendor/k8s.io/kubernetes/test/e2e/common/kubelet.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bytes"
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Kubelet", func() {
f := framework.NewDefaultFramework("kubelet-test")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
Context("when scheduling a busybox command in a pod", func() {
podName := "busybox-scheduling-" + string(uuid.NewUUID())
/*
Release : v1.13
Testname: Kubelet, log output, default
Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs.
*/
framework.ConformanceIt("should print the output to logs [NodeConformance]", func() {
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: framework.BusyBoxImage,
Name: podName,
Command: []string{"sh", "-c", "echo 'Hello World' ; sleep 240"},
},
},
},
})
Eventually(func() string {
sinceTime := metav1.NewTime(time.Now().Add(time.Duration(-1 * time.Hour)))
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{SinceTime: &sinceTime}).Stream()
if err != nil {
return ""
}
defer rc.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("Hello World\n"))
})
})
Context("when scheduling a busybox command that always fails in a pod", func() {
var podName string
BeforeEach(func() {
podName = "bin-false" + string(uuid.NewUUID())
podClient.Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: framework.BusyBoxImage,
Name: podName,
Command: []string{"/bin/false"},
},
},
},
})
})
/*
Release : v1.13
Testname: Kubelet, failed pod, terminated reason
Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason.
*/
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() {
Eventually(func() error {
podData, err := podClient.Get(podName, metav1.GetOptions{})
if err != nil {
return err
}
if len(podData.Status.ContainerStatuses) != 1 {
return fmt.Errorf("expected only one container in the pod %q", podName)
}
contTerminatedState := podData.Status.ContainerStatuses[0].State.Terminated
if contTerminatedState == nil {
return fmt.Errorf("expected state to be terminated. Got pod status: %+v", podData.Status)
}
if contTerminatedState.ExitCode == 0 || contTerminatedState.Reason == "" {
return fmt.Errorf("expected non-zero exitCode and non-empty terminated state reason. Got exitCode: %+v and terminated state reason: %+v", contTerminatedState.ExitCode, contTerminatedState.Reason)
}
return nil
}, time.Minute, time.Second*4).Should(BeNil())
})
/*
Release : v1.13
Testname: Kubelet, failed pod, delete
Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted.
*/
framework.ConformanceIt("should be possible to delete [NodeConformance]", func() {
err := podClient.Delete(podName, &metav1.DeleteOptions{})
Expect(err).To(BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})
Context("when scheduling a busybox Pod with hostAliases", func() {
podName := "busybox-host-aliases" + string(uuid.NewUUID())
/*
Release : v1.13
Testname: Kubelet, hostAliases
Description: Create a Pod with hostAliases and a container with command to output /etc/hosts entries. Pod's logs MUST have matching entries of specified hostAliases to the output of /etc/hosts entries.
*/
framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func() {
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: framework.BusyBoxImage,
Name: podName,
Command: []string{"/bin/sh", "-c", "cat /etc/hosts; sleep 6000"},
},
},
HostAliases: []v1.HostAlias{
{
IP: "123.45.67.89",
Hostnames: []string{"foo", "bar"},
},
},
},
})
Eventually(func() error {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
defer rc.Close()
if err != nil {
return err
}
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
hostsFileContent := buf.String()
if !strings.Contains(hostsFileContent, "123.45.67.89\tfoo\tbar") {
return fmt.Errorf("expected hosts file to contain entries from HostAliases. Got:\n%+v", hostsFileContent)
}
return nil
}, time.Minute, time.Second*4).Should(BeNil())
})
})
Context("when scheduling a read only busybox container", func() {
podName := "busybox-readonly-fs" + string(uuid.NewUUID())
/*
Release : v1.13
Testname: Kubelet, pod with read only root file system
Description: Create a Pod with security context set with ReadOnlyRootFileSystem set to true. The Pod then tries to write to the /file on the root, write operation to the root filesystem MUST fail as expected.
*/
framework.ConformanceIt("should not write to root filesystem [NodeConformance]", func() {
isReadOnly := true
podClient.CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
// Don't restart the Pod since it is expected to exit
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: framework.BusyBoxImage,
Name: podName,
Command: []string{"/bin/sh", "-c", "echo test > /file; sleep 240"},
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly,
},
},
},
},
})
Eventually(func() string {
rc, err := podClient.GetLogs(podName, &v1.PodLogOptions{}).Stream()
if err != nil {
return ""
}
defer rc.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(rc)
return buf.String()
}, time.Minute, time.Second*4).Should(Equal("/bin/sh: can't create /file: Read-only file system\n"))
})
})
})

View File

@ -20,10 +20,10 @@ import (
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -51,9 +51,12 @@ var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
}
/*
Testname: kubelet-managed-etc-hosts
Description: Make sure Kubelet correctly manages /etc/hosts and mounts
it into the container.
Release : v1.9
Testname: Kubelet, managed etc hosts
Description: Create a Pod with containers with hostNetwork set to false, one of the containers mounts the /etc/hosts file form the host. Create a second Pod with hostNetwork set to true.
1. The Pod with hostNetwork=false MUST have /etc/hosts of containers managed by the Kubelet.
2. The Pod with hostNetwork=false but the container mounts /etc/hosts file from the host. The /etc/hosts file MUST not be managed by the Kubelet.
3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet.
*/
framework.ConformanceIt("should test kubelet managed /etc/hosts file [NodeConformance]", func() {
By("Setting up the test")
@ -123,7 +126,7 @@ func assertManagedStatus(
}
}
glog.Warningf(
klog.Warningf(
"For pod: %s, name: %s, expected %t, (/etc/hosts was %q), (/etc/hosts-original was %q), retryCount: %d",
podName, name, expectedIsManaged, etcHostsContent, etcHostsOriginalContent, retryCount)

View File

@ -0,0 +1,173 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
f := framework.NewDefaultFramework("container-lifecycle-hook")
var podClient *framework.PodClient
const (
podCheckInterval = 1 * time.Second
postStartWaitTimeout = 2 * time.Minute
preStopWaitTimeout = 30 * time.Second
)
Context("when create a pod with lifecycle hook", func() {
var targetIP string
podHandleHookRequest := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-handle-http-request",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pod-handle-http-request",
Image: imageutils.GetE2EImage(imageutils.Netexec),
Ports: []v1.ContainerPort{
{
ContainerPort: 8080,
Protocol: v1.ProtocolTCP,
},
},
},
},
},
}
BeforeEach(func() {
podClient = f.PodClient()
By("create the container to handle the HTTPGet hook request.")
newPod := podClient.CreateSync(podHandleHookRequest)
targetIP = newPod.Status.PodIP
})
testPodWithHook := func(podWithHook *v1.Pod) {
By("create the pod with lifecycle hook")
podClient.CreateSync(podWithHook)
if podWithHook.Spec.Containers[0].Lifecycle.PostStart != nil {
By("check poststart hook")
Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
`GET /echo\?msg=poststart`)
}, postStartWaitTimeout, podCheckInterval).Should(BeNil())
}
By("delete the pod with lifecycle hook")
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
By("check prestop hook")
Eventually(func() error {
return podClient.MatchContainerOutput(podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[0].Name,
`GET /echo\?msg=prestop`)
}, preStopWaitTimeout, podCheckInterval).Should(BeNil())
}
}
/*
Release : v1.9
Testname: Pod Lifecycle, post start exec hook
Description: When a post start handler is specified in the container lifecycle using a Exec action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=poststart"},
},
},
}
podWithHook := getPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, prestop exec hook
Description: When a pre-stop handler is specified in the container lifecycle using a Exec action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"sh", "-c", "curl http://" + targetIP + ":8080/echo?msg=prestop"},
},
},
}
podWithHook := getPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Hostexec), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, post start http hook
Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed.
*/
framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PostStart: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=poststart",
Host: targetIP,
Port: intstr.FromInt(8080),
},
},
}
podWithHook := getPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
testPodWithHook(podWithHook)
})
/*
Release : v1.9
Testname: Pod Lifecycle, prestop http hook
Description: When a pre-stop handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed.
*/
framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func() {
lifecycle := &v1.Lifecycle{
PreStop: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=prestop",
Host: targetIP,
Port: intstr.FromInt(8080),
},
},
}
podWithHook := getPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageName(), lifecycle)
testPodWithHook(podWithHook)
})
})
})
func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Lifecycle: lifecycle,
},
},
},
}
}

View File

@ -31,9 +31,10 @@ var _ = Describe("[sig-network] Networking", func() {
// expect exactly one unique hostname. Each of these endpoints reports
// its own hostname.
/*
Testname: networking-intra-pod-http
Description: Try to hit test endpoints from a test container and make
sure each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@ -43,9 +44,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-intra-pod-udp
Description: Try to hit test endpoints from a test container using udp
and make sure each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod udp
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@ -55,9 +57,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-node-pod-http
Description: Try to hit test endpoints from the pod and make sure each
of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http, from node
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for node-pod communication: http [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
@ -67,9 +70,10 @@ var _ = Describe("[sig-network] Networking", func() {
})
/*
Testname: networking-node-pod-udp
Description: Try to hit test endpoints from the pod using udp and make sure
each of them can report a unique hostname.
Release : v1.9
Testname: Networking, intra pod http, from node
Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes.
The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames.
*/
framework.ConformanceIt("should function for node-pod communication: udp [NodeConformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)

166
vendor/k8s.io/kubernetes/test/e2e/common/node_lease.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
coordv1beta1 "k8s.io/api/coordination/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
v1node "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("[Feature:NodeLease][NodeAlphaFeature:NodeLease]", func() {
var nodeName string
f := framework.NewDefaultFramework("node-lease-test")
BeforeEach(func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero())
nodeName = nodes.Items[0].ObjectMeta.Name
})
Context("when the NodeLease feature is enabled", func() {
It("the kubelet should create and update a lease in the kube-node-lease namespace", func() {
leaseClient := f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
var (
err error
lease *coordv1beta1.Lease
)
By("check that lease for this Kubelet exists in the kube-node-lease namespace")
Eventually(func() error {
lease, err = leaseClient.Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, 5*time.Minute, 5*time.Second).Should(BeNil())
// check basic expectations for the lease
Expect(expectLease(lease, nodeName)).To(BeNil())
By("check that node lease is updated at least once within the lease duration")
Eventually(func() error {
newLease, err := leaseClient.Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
// check basic expectations for the latest lease
if err := expectLease(newLease, nodeName); err != nil {
return err
}
// check that RenewTime has been updated on the latest lease
newTime := (*newLease.Spec.RenewTime).Time
oldTime := (*lease.Spec.RenewTime).Time
if !newTime.After(oldTime) {
return fmt.Errorf("new lease has time %v, which is not after old lease time %v", newTime, oldTime)
}
return nil
}, time.Duration(*lease.Spec.LeaseDurationSeconds)*time.Second,
time.Duration(*lease.Spec.LeaseDurationSeconds/4)*time.Second)
})
It("the kubelet should report node status infrequently", func() {
By("wait until node is ready")
framework.WaitForNodeToBeReady(f.ClientSet, nodeName, 5*time.Minute)
By("wait until there is node lease")
var err error
var lease *coordv1beta1.Lease
Eventually(func() error {
lease, err = f.ClientSet.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
return nil
}, 5*time.Minute, 5*time.Second).Should(BeNil())
// check basic expectations for the lease
Expect(expectLease(lease, nodeName)).To(BeNil())
leaseDuration := time.Duration(*lease.Spec.LeaseDurationSeconds) * time.Second
By("verify NodeStatus report period is longer than lease duration")
// NodeStatus is reported from node to master when there is some change or
// enough time has passed. So for here, keep checking the time diff
// between 2 NodeStatus report, until it is longer than lease duration (
// the same as nodeMonitorGracePeriod).
heartbeatTime := getNextReadyConditionHeartbeatTime(f.ClientSet, nodeName, metav1.Time{})
Eventually(func() error {
nextHeartbeatTime := getNextReadyConditionHeartbeatTime(f.ClientSet, nodeName, heartbeatTime)
if nextHeartbeatTime.Time.After(heartbeatTime.Time.Add(leaseDuration)) {
return nil
}
heartbeatTime = nextHeartbeatTime
return fmt.Errorf("node status report period is shorter than lease duration")
// Enter next round immediately.
}, 5*time.Minute, time.Nanosecond).Should(BeNil())
By("verify node is still in ready status even though node status report is infrequent")
// This check on node status is only meaningful when this e2e test is
// running as cluster e2e test, because node e2e test does not create and
// run controller manager, i.e., no node lifecycle controller.
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).To(BeNil())
_, readyCondition := v1node.GetNodeCondition(&node.Status, corev1.NodeReady)
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
})
})
})
func getNextReadyConditionHeartbeatTime(clientSet clientset.Interface, nodeName string, prevHeartbeatTime metav1.Time) metav1.Time {
var newHeartbeatTime metav1.Time
Eventually(func() error {
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return err
}
_, readyCondition := v1node.GetNodeCondition(&node.Status, corev1.NodeReady)
Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue))
newHeartbeatTime = readyCondition.LastHeartbeatTime
if prevHeartbeatTime.Before(&newHeartbeatTime) {
return nil
}
return fmt.Errorf("heartbeat has not changed yet")
}, 5*time.Minute, 5*time.Second).Should(BeNil())
return newHeartbeatTime
}
func expectLease(lease *coordv1beta1.Lease, nodeName string) error {
// expect values for HolderIdentity, LeaseDurationSeconds, and RenewTime
if lease.Spec.HolderIdentity == nil {
return fmt.Errorf("Spec.HolderIdentity should not be nil")
}
if lease.Spec.LeaseDurationSeconds == nil {
return fmt.Errorf("Spec.LeaseDurationSeconds should not be nil")
}
if lease.Spec.RenewTime == nil {
return fmt.Errorf("Spec.RenewTime should not be nil")
}
// ensure that the HolderIdentity matches the node name
if *lease.Spec.HolderIdentity != nodeName {
return fmt.Errorf("Spec.HolderIdentity (%v) should match the node name (%v)", *lease.Spec.HolderIdentity, nodeName)
}
return nil
}

View File

@ -29,6 +29,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@ -36,16 +37,20 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var (
buildBackOffDuration = time.Minute
syncLoopFrequency = 10 * time.Second
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
// maxReadyStatusUpdateTolerance specifies the latency that allows kubelet to update pod status.
// When kubelet is under heavy load (tests may be parallelized), the delay may be longer, hence
// causing tests to be flaky.
maxReadyStatusUpdateTolerance = 10 * time.Second
)
// testHostIP tests that a pod gets a host IP
@ -129,9 +134,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-created-pod-assigned-hostip
Description: Make sure when a pod is created that it is assigned a host IP
Address.
Release : v1.9
Testname: Pods, assigned hostip
Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address.
*/
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
name := "pod-hostip-" + string(uuid.NewUUID())
@ -151,9 +156,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-submitted-removed
Description: Makes sure a pod is created, a watch can be setup for the pod,
pod creation was observed, pod is deleted, and pod deletion is observed.
Release : v1.9
Testname: Pods, lifecycle
Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list.
*/
framework.ConformanceIt("should be submitted and removed [NodeConformance]", func() {
By("creating the pod")
@ -171,7 +176,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@ -277,8 +282,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-updated-successfully
Description: Make sure it is possible to successfully update a pod's labels.
Release : v1.9
Testname: Pods, update
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful.
*/
framework.ConformanceIt("should be updated [NodeConformance]", func() {
By("creating the pod")
@ -296,7 +302,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@ -330,10 +336,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-update-active-deadline-seconds
Description: Make sure it is possible to create a pod, update its
activeDeadlineSecondsValue, and then waits for the deadline to pass
and verifies the pod is terminated.
Release : v1.9
Testname: Pods, ActiveDeadlineSeconds
Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses.
*/
framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func() {
By("creating the pod")
@ -351,7 +356,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "nginx",
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
Image: imageutils.GetE2EImage(imageutils.Nginx),
},
},
},
@ -377,9 +382,9 @@ var _ = framework.KubeDescribe("Pods", func() {
})
/*
Testname: pods-contain-services-environment-variables
Description: Make sure that when a pod is created it contains environment
variables for each active service.
Release : v1.9
Testname: Pods, service environment variables
Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values.
*/
framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func() {
// Make a pod that will be a service.
@ -442,7 +447,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
},
},
@ -467,7 +472,13 @@ var _ = framework.KubeDescribe("Pods", func() {
}, maxRetries, "Container should have service environment variables set")
})
It("should support remote command execution over websockets [NodeConformance]", func() {
/*
Release : v1.13
Testname: Pods, remote command execution over websocket
Description: A Pod is created. Websocket is created to retrieve exec command output from this pod.
Message retrieved form Websocket MUST match with expected exec command output.
*/
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
@ -481,7 +492,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
},
},
@ -499,8 +510,8 @@ var _ = framework.KubeDescribe("Pods", func() {
Param("stderr", "1").
Param("stdout", "1").
Param("container", pod.Spec.Containers[0].Name).
Param("command", "cat").
Param("command", "/etc/resolv.conf")
Param("command", "echo").
Param("command", "remote execution test")
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
@ -536,14 +547,20 @@ var _ = framework.KubeDescribe("Pods", func() {
if buf.Len() == 0 {
return fmt.Errorf("Unexpected output from server")
}
if !strings.Contains(buf.String(), "nameserver") {
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
if !strings.Contains(buf.String(), "remote execution test") {
return fmt.Errorf("Expected to find 'remote execution test' in %q", buf.String())
}
return nil
}, time.Minute, 10*time.Second).Should(BeNil())
})
It("should support retrieving logs from the container over websockets [NodeConformance]", func() {
/*
Release : v1.13
Testname: Pods, logs from websockets
Description: A Pod is created. Websocket is created to retrieve log of a container from this pod.
Message retrieved form Websocket MUST match with container's output.
*/
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
@ -557,7 +574,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: "main",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
@ -600,6 +617,7 @@ var _ = framework.KubeDescribe("Pods", func() {
}
})
// Slow (~7 mins)
It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() {
podName := "pod-back-off-image"
containerName := "back-off"
@ -612,7 +630,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
@ -623,7 +641,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxSlim)
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
})
time.Sleep(syncLoopFrequency)
@ -640,7 +658,7 @@ var _ = framework.KubeDescribe("Pods", func() {
}
})
// Slow issue #19027 (20 mins)
// Slow by design (~27 mins) issue #19027
It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() {
podName := "back-off-cap"
containerName := "back-off-cap"
@ -653,7 +671,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []v1.Container{
{
Name: containerName,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
@ -694,4 +712,64 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
}
})
// TODO(freehan): label the test to be [NodeConformance] after tests are proven to be stable.
It("should support pod readiness gates [NodeFeature:PodReadinessGate]", func() {
podName := "pod-ready"
readinessGate1 := "k8s.io/test-condition1"
readinessGate2 := "k8s.io/test-condition2"
patchStatusFmt := `{"status":{"conditions":[{"type":%q, "status":%q}]}}`
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "pod-readiness-gate"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pod-readiness-gate",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType(readinessGate1)},
{ConditionType: v1.PodConditionType(readinessGate2)},
},
},
}
validatePodReadiness := func(expectReady bool) {
Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
podReady := podClient.PodIsReady(podName)
res := expectReady == podReady
if !res {
framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
}
return res, nil
})).NotTo(HaveOccurred())
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false initially.")
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
Expect(err).NotTo(HaveOccurred())
// Sleep for 10 seconds.
time.Sleep(maxReadyStatusUpdateTolerance)
Expect(podClient.PodIsReady(podName)).To(BeFalse(), "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
Expect(err).NotTo(HaveOccurred())
validatePodReadiness(true)
By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
Expect(err).NotTo(HaveOccurred())
validatePodReadiness(false)
})
})

View File

@ -24,6 +24,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
)
type PrivilegedPodTestConfig struct {
@ -90,14 +91,14 @@ func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
Containers: []v1.Container{
{
Name: c.privilegedContainer,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
{
Name: c.notPrivilegedContainer,
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{"/bin/sleep", "10000"},

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,147 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
var _ = Describe("[sig-storage] Projected combined", func() {
f := framework.NewDefaultFramework("projected")
// Test multiple projections
/*
Release : v1.9
Testname: Projected Volume, multiple projections
Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
var err error
podName := "projected-volume-" + string(uuid.NewUUID())
secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID())
configMapName := "configmap-projected-all-test-volume-" + string(uuid.NewUUID())
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: configMapName,
},
Data: map[string]string{
"configmap-data": "configmap-value-1",
},
}
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: secretName,
},
Data: map[string][]byte{
"secret-data": []byte("secret-value-1"),
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "projected-all-volume-test",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "cat /all/podname && cat /all/secret-data && cat /all/configmap-data"},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/all",
ReadOnly: false,
},
},
},
}
f.TestContainerOutput("Check all projections for projected volume plugin", pod, 0, []string{
fmt.Sprintf("%s", podName),
"secret-value-1",
"configmap-value-1",
})
})
})
func projectedAllVolumeBasePod(podName string, secretName string, configMapName string, labels, annotations map[string]string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: labels,
Annotations: annotations,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "podinfo",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
DownwardAPI: &v1.DownwardAPIProjection{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "podname",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
},
},
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: secretName,
},
},
},
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
},
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}

View File

@ -0,0 +1,683 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Projected configMap", func() {
f := framework.NewDefaultFramework("projected")
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, volume mode default
Description: A Pod is created with projected volume source ConfigMap to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r—-r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, volume mode 0400
Description: A Pod is created with projected volume source ConfigMap to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r——-——-—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
doProjectedConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
})
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeFeature:FSGroup]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, non-root user
Description: A Pod is created with projected volume source ConfigMap to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume as non-root with FSGroup [NodeFeature:FSGroup]", func() {
doProjectedConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped
Description: A Pod is created with projected volume source ConfigMap to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doProjectedConfigMapE2EWithMappings(f, 0, 0, nil)
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped, volume mode 0400
Description: A Pod is created with projected volume source ConfigMap to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [NodeConformance]", func() {
mode := int32(0400)
doProjectedConfigMapE2EWithMappings(f, 0, 0, &mode)
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, mapped, non-root user
Description: A Pod is created with projected volume source ConfigMap to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r-—r——r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
doProjectedConfigMapE2EWithMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume with mappings as non-root with FSGroup [NodeFeature:FSGroup]", func() {
doProjectedConfigMapE2EWithMappings(f, 1000, 1001, nil)
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, update
Description: A Pod is created with projected volume source ConfigMap to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
*/
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
name := "projected-configmap-test-upd-" + string(uuid.NewUUID())
volumeName := "projected-configmap-volume"
volumeMountPath := "/etc/projected-configmap-volume"
containerName := "projected-configmap-volume-test"
configMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
},
}
By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, create, update and delete
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as value-1. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/projected-configmap-volumes"
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
deleteContainerName := "delcm-volume-test"
deleteVolumeName := "deletecm-volume"
deleteConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: deleteName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
updateContainerName := "updcm-volume-test"
updateVolumeName := "updatecm-volume"
updateConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: updateName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "createcm-volume-test"
createVolumeName := "createcm-volume"
createConfigMap := &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: createName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: deleteVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: deleteName,
},
Optional: &trueVal,
},
},
},
},
},
},
{
Name: updateVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: updateName,
},
Optional: &trueVal,
},
},
},
},
},
},
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: createName,
},
Optional: &trueVal,
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
MountPath: path.Join(volumeMountPath, "delete"),
ReadOnly: true,
},
},
},
{
Name: updateContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
MountPath: path.Join(volumeMountPath, "update"),
ReadOnly: true,
},
},
},
{
Name: createContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
})
/*
Release : v1.9
Testname: Projected Volume, ConfigMap, multiple volume paths
Description: A Pod is created with a projected volume source ConfigMap to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts.
*/
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
var (
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "projected-configmap-volume"
volumeMountPath = "/etc/projected-configmap-volume"
volumeName2 = "projected-configmap-volume-2"
volumeMountPath2 = "/etc/projected-configmap-volume-2"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
})
})
//The pod is in pending during volume creation until the configMap objects are available
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/projected-configmap-volumes"
podName := "pod-projected-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPod(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
})
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
volumeMountPath := "/etc/configmap-volumes"
podName := "pod-configmaps-" + string(uuid.NewUUID())
err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional configMap in namespace %q", podName, f.Namespace.Name)
})
})
func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
userID := int64(uid)
groupID := int64(fsGroup)
var (
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "projected-configmap-volume"
volumeMountPath = "/etc/projected-configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-configmap-volume/data-1",
"--file_mode=/etc/projected-configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if userID != 0 {
pod.Spec.SecurityContext.RunAsUser = &userID
}
if groupID != 0 {
pod.Spec.SecurityContext.FSGroup = &groupID
}
if defaultMode != nil {
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.DefaultMode = defaultMode
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
output := []string{
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
"mode of file \"/etc/projected-configmap-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
userID := int64(uid)
groupID := int64(fsGroup)
var (
name = "projected-configmap-test-volume-map-" + string(uuid.NewUUID())
volumeName = "projected-configmap-volume"
volumeMountPath = "/etc/projected-configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Items: []v1.KeyToPath{
{
Key: "data-2",
Path: "path/to/data-2",
},
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "projected-configmap-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{"--file_content=/etc/projected-configmap-volume/path/to/data-2",
"--file_mode=/etc/projected-configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if userID != 0 {
pod.Spec.SecurityContext.RunAsUser = &userID
}
if groupID != 0 {
pod.Spec.SecurityContext.FSGroup = &groupID
}
if itemMode != nil {
//pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = itemMode
} else {
mode := int32(0644)
itemMode = &mode
}
// Just check file mode if fsGroup is not set. If fsGroup is set, the
// final mode is adjusted and we are not testing that case.
output := []string{
"content of file \"/etc/projected-configmap-volume/path/to/data-2\": value-2",
}
if fsGroup == 0 {
modeString := fmt.Sprintf("%v", os.FileMode(*itemMode))
output = append(output, "mode of file \"/etc/projected-configmap-volume/path/to/data-2\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}

View File

@ -0,0 +1,398 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Projected downwardAPI", func() {
f := framework.NewDefaultFramework("projected")
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, pod name
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, volume mode 0400
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r—-—————.
*/
framework.ConformanceIt("should set DefaultMode on files [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, volume mode 0400
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r—-—————.
*/
framework.ConformanceIt("should set mode on item file [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--------",
})
})
It("should provide podname as non-root with fsgroup [NodeFeature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should provide podname as non-root with fsgroup and defaultMode [NodeFeature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podinfo/podname\": -r--r-----",
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, update labels
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels.
*/
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
podName := "labelsupdate" + string(uuid.NewUUID())
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, update annotation
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations.
*/
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory limits
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory request
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
/*
Release : v1.9
Testname: Projected Volume, DownwardAPI, memory limit, node allocatable
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles.
*/
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})
func projectedDownwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
pod := projectedDownwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc/podinfo",
},
},
},
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items[0].Mode = itemMode
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
}
return pod
}
func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
pod := projectedDownwardAPIVolumeBasePod(name, labels, annotations)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", "--retry_time=1200", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc/podinfo",
ReadOnly: false,
},
},
},
}
applyLabelsAndAnnotationsToProjectedDownwardAPIPod(labels, annotations, pod)
return pod
}
func projectedDownwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "podinfo",
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
DownwardAPI: &v1.DownwardAPIProjection{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "podname",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
{
Path: "cpu_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.cpu",
},
},
{
Path: "cpu_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.cpu",
},
},
{
Path: "memory_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.memory",
},
},
{
Path: "memory_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.memory",
},
},
},
},
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
func applyLabelsAndAnnotationsToProjectedDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
if len(labels) > 0 {
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "labels",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
})
}
if len(annotations) > 0 {
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "annotations",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations",
},
})
}
}

View File

@ -0,0 +1,582 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[sig-storage] Projected secret", func() {
f := framework.NewDefaultFramework("projected")
/*
Release : v1.9
Testname: Projected Volume, Secrets, volume mode default
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Release : v1.9
Testname: Projected Volume, Secrets, volume mode 0400
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Release : v1.9
Testname: Project Volume, Secrets, non-root, custom fsGroup
Description: A Pod is created with a projected volume source secret to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-r————-.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
uid := int64(1000)
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &uid)
})
/*
Release : v1.9
Testname: Projected Volume, Secrets, mapped
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————— on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doProjectedSecretE2EWithMapping(f, nil)
})
/*
Release : v1.9
Testname: Projected Volume, Secrets, mapped, volume mode 0400
Description: A Pod is created with a projected volume source secret to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r—-—————— on the mapped volume.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
mode := int32(0400)
doProjectedSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
var (
namespace2 *v1.Namespace
err error
secret2Name = "projected-secret-test-" + string(uuid.NewUUID())
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
secret2 := secretForTest(namespace2.Name, secret2Name)
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
})
/*
Release : v1.9
Testname: Projected Volume, Secrets, mapped, multiple paths
Description: A Pod is created with a projected volume source secret to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r—-—————— on the mapped volumes.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
var (
name = "projected-secret-test-" + string(uuid.NewUUID())
volumeName = "projected-secret-volume"
volumeMountPath = "/etc/projected-secret-volume"
volumeName2 = "projected-secret-volume-2"
volumeMountPath2 = "/etc/projected-secret-volume-2"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/data-1",
"--file_mode=/etc/projected-secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
"mode of file \"/etc/projected-secret-volume/data-1\": -rw-r--r--",
})
})
/*
Release : v1.9
Testname: Projected Volume, Secrets, create, update delete
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
trueVal := true
volumeMountPath := "/etc/projected-secret-volumes"
deleteName := "s-test-opt-del-" + string(uuid.NewUUID())
deleteContainerName := "dels-volume-test"
deleteVolumeName := "deletes-volume"
deleteSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: deleteName,
},
Data: map[string][]byte{
"data-1": []byte("value-1"),
},
}
updateName := "s-test-opt-upd-" + string(uuid.NewUUID())
updateContainerName := "upds-volume-test"
updateVolumeName := "updates-volume"
updateSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: updateName,
},
Data: map[string][]byte{
"data-1": []byte("value-1"),
},
}
createName := "s-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "creates-volume-test"
createVolumeName := "creates-volume"
createSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: createName,
},
Data: map[string][]byte{
"data-1": []byte("value-1"),
},
}
By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: deleteVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: deleteName,
},
Optional: &trueVal,
},
},
},
},
},
},
{
Name: updateVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: updateName,
},
Optional: &trueVal,
},
},
},
},
},
},
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: createName,
},
Optional: &trueVal,
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: deleteVolumeName,
MountPath: path.Join(volumeMountPath, "delete"),
ReadOnly: true,
},
},
},
{
Name: updateContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
Name: updateVolumeName,
MountPath: path.Join(volumeMountPath, "update"),
ReadOnly: true,
},
},
},
{
Name: createContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollCreateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
}
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
pollUpdateLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
}
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
pollDeleteLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
}
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
Expect(err).NotTo(HaveOccurred(), "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
By("waiting to observe update in volume")
Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
})
//The secret is in pending during volume creation until the secret objects are available
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/projected-secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
//Secret object defined for the pod, If a key is specified which is not present in the secret,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
})
func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32,
secretName string, fsGroup *int64, uid *int64) {
var (
volumeName = "projected-secret-volume"
volumeMountPath = "/etc/projected-secret-volume"
secret = secretForTest(f.Namespace.Name, secretName)
)
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: secretName,
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "projected-secret-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/data-1",
"--file_mode=/etc/projected-secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if defaultMode != nil {
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.DefaultMode = defaultMode
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
if fsGroup != nil || uid != nil {
pod.Spec.SecurityContext = &v1.PodSecurityContext{
FSGroup: fsGroup,
RunAsUser: uid,
}
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
expectedOutput := []string{
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
"mode of file \"/etc/projected-secret-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
var (
name = "projected-secret-test-map-" + string(uuid.NewUUID())
volumeName = "projected-secret-volume"
volumeMountPath = "/etc/projected-secret-volume"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
Secret: &v1.SecretProjection{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "new-path-data-1",
},
},
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "projected-secret-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/projected-secret-volume/new-path-data-1",
"--file_mode=/etc/projected-secret-volume/new-path-data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if mode != nil {
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.Items[0].Mode = mode
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = mode
} else {
defaultItemMode := int32(0644)
mode = &defaultItemMode
}
modeString := fmt.Sprintf("%v", os.FileMode(*mode))
expectedOutput := []string{
"content of file \"/etc/projected-secret-volume/new-path-data-1\": value-1",
"mode of file \"/etc/projected-secret-volume/new-path-data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}

391
vendor/k8s.io/kubernetes/test/e2e/common/runtime.go generated vendored Normal file
View File

@ -0,0 +1,391 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"path"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Container Runtime", func() {
f := framework.NewDefaultFramework("container-runtime")
Describe("blackbox test", func() {
Context("when starting a container that exits", func() {
/*
Release : v1.13
Testname: Container Runtime, Restart Policy, Pod Phases
Description: If the restart policy is set to Always, Pod MUST be restarted when terminated, If restart policy is OnFailure, Pod MUST be started only if it is terminated with non-zero exit code. If the restart policy is Never, Pod MUST never be restarted. All these three test cases MUST verify the restart counts accordingly.
*/
framework.ConformanceIt("should run with the expected status [NodeConformance]", func() {
restartCountVolumeName := "restart-count"
restartCountVolumePath := "/restart-count"
testContainer := v1.Container{
Image: framework.BusyBoxImage,
VolumeMounts: []v1.VolumeMount{
{
MountPath: restartCountVolumePath,
Name: restartCountVolumeName,
},
},
}
testVolumes := []v1.Volume{
{
Name: restartCountVolumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
},
},
}
testCases := []struct {
Name string
RestartPolicy v1.RestartPolicy
Phase v1.PodPhase
State ContainerState
RestartCount int32
Ready bool
}{
{"terminate-cmd-rpa", v1.RestartPolicyAlways, v1.PodRunning, ContainerStateRunning, 2, true},
{"terminate-cmd-rpof", v1.RestartPolicyOnFailure, v1.PodSucceeded, ContainerStateTerminated, 1, false},
{"terminate-cmd-rpn", v1.RestartPolicyNever, v1.PodFailed, ContainerStateTerminated, 0, false},
}
for _, testCase := range testCases {
// It failed at the 1st run, then succeeded at 2nd run, then run forever
cmdScripts := `
f=%s
count=$(echo 'hello' >> $f ; wc -l $f | awk {'print $1'})
if [ $count -eq 1 ]; then
exit 1
fi
if [ $count -eq 2 ]; then
exit 0
fi
while true; do sleep 1; done
`
tmpCmd := fmt.Sprintf(cmdScripts, path.Join(restartCountVolumePath, "restartCount"))
testContainer.Name = testCase.Name
testContainer.Command = []string{"sh", "-c", tmpCmd}
terminateContainer := ConformanceContainer{
PodClient: f.PodClient(),
Container: testContainer,
RestartPolicy: testCase.RestartPolicy,
Volumes: testVolumes,
PodSecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
}
terminateContainer.Create()
defer terminateContainer.Delete()
By(fmt.Sprintf("Container '%s': should get the expected 'RestartCount'", testContainer.Name))
Eventually(func() (int32, error) {
status, err := terminateContainer.GetStatus()
return status.RestartCount, err
}, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.RestartCount))
By(fmt.Sprintf("Container '%s': should get the expected 'Phase'", testContainer.Name))
Eventually(terminateContainer.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.Phase))
By(fmt.Sprintf("Container '%s': should get the expected 'Ready' condition", testContainer.Name))
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
status, err := terminateContainer.GetStatus()
Expect(err).ShouldNot(HaveOccurred())
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name))
Expect(terminateContainer.Delete()).To(Succeed())
Eventually(terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(BeFalse())
}
})
rootUser := int64(0)
nonRootUser := int64(10000)
for _, testCase := range []struct {
name string
container v1.Container
phase v1.PodPhase
message gomegatypes.GomegaMatcher
}{
{
name: "if TerminationMessagePath is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-log"},
TerminationMessagePath: "/dev/termination-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &rootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
{
name: "if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE > /dev/termination-custom-log"},
TerminationMessagePath: "/dev/termination-custom-log",
SecurityContext: &v1.SecurityContext{
RunAsUser: &nonRootUser,
},
},
phase: v1.PodSucceeded,
message: Equal("DONE"),
},
{
name: "from log output if TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n DONE; /bin/false"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodFailed,
message: Equal("DONE\n"),
},
{
name: "as empty when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal(""),
},
{
name: "from file when pod succeeds and TerminationMessagePolicy FallbackToLogOnError is set [NodeConformance]",
container: v1.Container{
Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"},
Args: []string{"/bin/echo -n OK > /dev/termination-log; /bin/echo DONE; /bin/true"},
TerminationMessagePath: "/dev/termination-log",
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
phase: v1.PodSucceeded,
message: Equal("OK"),
},
} {
It(fmt.Sprintf("should report termination message %s", testCase.name), func() {
testCase.container.Name = "termination-message-container"
c := ConformanceContainer{
PodClient: f.PodClient(),
Container: testCase.container,
RestartPolicy: v1.RestartPolicyNever,
}
By("create the container")
c.Create()
defer c.Delete()
By(fmt.Sprintf("wait for the container to reach %s", testCase.phase))
Eventually(c.GetPhase, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(Equal(testCase.phase))
By("get the container status")
status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred())
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
By("the termination message should be set")
Expect(status.State.Terminated.Message).Should(testCase.message)
By("delete the container")
Expect(c.Delete()).To(Succeed())
})
}
})
Context("when running a container with a new image", func() {
// The service account only has pull permission
auth := `
{
"auths": {
"https://gcr.io": {
"auth": "X2pzb25fa2V5OnsKICAidHlwZSI6ICJzZXJ2aWNlX2FjY291bnQiLAogICJwcm9qZWN0X2lkIjogImF1dGhlbnRpY2F0ZWQtaW1hZ2UtcHVsbGluZyIsCiAgInByaXZhdGVfa2V5X2lkIjogImI5ZjJhNjY0YWE5YjIwNDg0Y2MxNTg2MDYzZmVmZGExOTIyNGFjM2IiLAogICJwcml2YXRlX2tleSI6ICItLS0tLUJFR0lOIFBSSVZBVEUgS0VZLS0tLS1cbk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzdTSG5LVEVFaVlMamZcbkpmQVBHbUozd3JCY2VJNTBKS0xxS21GWE5RL3REWGJRK2g5YVl4aldJTDhEeDBKZTc0bVovS01uV2dYRjVLWlNcbm9BNktuSU85Yi9SY1NlV2VpSXRSekkzL1lYVitPNkNjcmpKSXl4anFWam5mVzJpM3NhMzd0OUE5VEZkbGZycm5cbjR6UkpiOWl4eU1YNGJMdHFGR3ZCMDNOSWl0QTNzVlo1ODhrb1FBZmgzSmhhQmVnTWorWjRSYko0aGVpQlFUMDNcbnZVbzViRWFQZVQ5RE16bHdzZWFQV2dydDZOME9VRGNBRTl4bGNJek11MjUzUG4vSzgySFpydEx4akd2UkhNVXhcbng0ZjhwSnhmQ3h4QlN3Z1NORit3OWpkbXR2b0wwRmE3ZGducFJlODZWRDY2ejNZenJqNHlLRXRqc2hLZHl5VWRcbkl5cVhoN1JSQWdNQkFBRUNnZ0VBT3pzZHdaeENVVlFUeEFka2wvSTVTRFVidi9NazRwaWZxYjJEa2FnbmhFcG9cbjFJajJsNGlWMTByOS9uenJnY2p5VlBBd3pZWk1JeDFBZVF0RDdoUzRHWmFweXZKWUc3NkZpWFpQUm9DVlB6b3VcbmZyOGRDaWFwbDV0enJDOWx2QXNHd29DTTdJWVRjZmNWdDdjRTEyRDNRS3NGNlo3QjJ6ZmdLS251WVBmK0NFNlRcbmNNMHkwaCtYRS9kMERvSERoVy96YU1yWEhqOFRvd2V1eXRrYmJzNGYvOUZqOVBuU2dET1lQd2xhbFZUcitGUWFcbkpSd1ZqVmxYcEZBUW14M0Jyd25rWnQzQ2lXV2lGM2QrSGk5RXRVYnRWclcxYjZnK1JRT0licWFtcis4YlJuZFhcbjZWZ3FCQWtKWjhSVnlkeFVQMGQxMUdqdU9QRHhCbkhCbmM0UW9rSXJFUUtCZ1FEMUNlaWN1ZGhXdGc0K2dTeGJcbnplanh0VjFONDFtZHVjQnpvMmp5b1dHbzNQVDh3ckJPL3lRRTM0cU9WSi9pZCs4SThoWjRvSWh1K0pBMDBzNmdcblRuSXErdi9kL1RFalk4MW5rWmlDa21SUFdiWHhhWXR4UjIxS1BYckxOTlFKS2ttOHRkeVh5UHFsOE1veUdmQ1dcbjJ2aVBKS05iNkhabnY5Q3lqZEo5ZzJMRG5RS0JnUUREcVN2eURtaGViOTIzSW96NGxlZ01SK205Z2xYVWdTS2dcbkVzZlllbVJmbU5XQitDN3ZhSXlVUm1ZNU55TXhmQlZXc3dXRldLYXhjK0krYnFzZmx6elZZdFpwMThNR2pzTURcbmZlZWZBWDZCWk1zVXQ3Qmw3WjlWSjg1bnRFZHFBQ0xwWitaLzN0SVJWdWdDV1pRMWhrbmxHa0dUMDI0SkVFKytcbk55SDFnM2QzUlFLQmdRQ1J2MXdKWkkwbVBsRklva0tGTkh1YTBUcDNLb1JTU1hzTURTVk9NK2xIckcxWHJtRjZcbkMwNGNTKzQ0N0dMUkxHOFVUaEpKbTRxckh0Ti9aK2dZOTYvMm1xYjRIakpORDM3TVhKQnZFYTN5ZUxTOHEvK1JcbjJGOU1LamRRaU5LWnhQcG84VzhOSlREWTVOa1BaZGh4a2pzSHdVNGRTNjZwMVRESUU0MGd0TFpaRFFLQmdGaldcbktyblFpTnEzOS9iNm5QOFJNVGJDUUFKbmR3anhTUU5kQTVmcW1rQTlhRk9HbCtqamsxQ1BWa0tNSWxLSmdEYkpcbk9heDl2OUc2Ui9NSTFIR1hmV3QxWU56VnRocjRIdHNyQTB0U3BsbWhwZ05XRTZWejZuQURqdGZQSnMyZUdqdlhcbmpQUnArdjhjY21MK3dTZzhQTGprM3ZsN2VlNXJsWWxNQndNdUdjUHhBb0dBZWRueGJXMVJMbVZubEFpSEx1L0xcbmxtZkF3RFdtRWlJMFVnK1BMbm9Pdk81dFE1ZDRXMS94RU44bFA0cWtzcGtmZk1Rbk5oNFNZR0VlQlQzMlpxQ1RcbkpSZ2YwWGpveXZ2dXA5eFhqTWtYcnBZL3ljMXpmcVRaQzBNTzkvMVVjMWJSR2RaMmR5M2xSNU5XYXA3T1h5Zk9cblBQcE5Gb1BUWGd2M3FDcW5sTEhyR3pNPVxuLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLVxuIiwKICAiY2xpZW50X2VtYWlsIjogImltYWdlLXB1bGxpbmdAYXV0aGVudGljYXRlZC1pbWFnZS1wdWxsaW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjExMzc5NzkxNDUzMDA3MzI3ODcxMiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L2ltYWdlLXB1bGxpbmclNDBhdXRoZW50aWNhdGVkLWltYWdlLXB1bGxpbmcuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iCn0=",
"email": "image-pulling@authenticated-image-pulling.iam.gserviceaccount.com"
}
}
}`
secret := &v1.Secret{
Data: map[string][]byte{v1.DockerConfigJsonKey: []byte(auth)},
Type: v1.SecretTypeDockerConfigJson,
}
// The following images are not added into NodeImageWhiteList, because this test is
// testing image pulling, these images don't need to be prepulled. The ImagePullPolicy
// is v1.PullAlways, so it won't be blocked by framework image white list check.
for _, testCase := range []struct {
description string
image string
secret bool
phase v1.PodPhase
waiting bool
}{
{
description: "should not be able to pull image from invalid registry",
image: "invalid.com/invalid/alpine:3.1",
phase: v1.PodPending,
waiting: true,
},
{
description: "should not be able to pull non-existing image from gcr.io",
image: "k8s.gcr.io/invalid-image:invalid-tag",
phase: v1.PodPending,
waiting: true,
},
{
description: "should be able to pull image from gcr.io",
image: "gcr.io/google-containers/debian-base:0.4.0",
phase: v1.PodRunning,
waiting: false,
},
{
description: "should be able to pull image from docker hub",
image: "alpine:3.7",
phase: v1.PodRunning,
waiting: false,
},
{
description: "should not be able to pull from private registry without secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.7",
phase: v1.PodPending,
waiting: true,
},
{
description: "should be able to pull from private registry with secret",
image: "gcr.io/authenticated-image-pulling/alpine:3.7",
secret: true,
phase: v1.PodRunning,
waiting: false,
},
} {
testCase := testCase
It(testCase.description+" [NodeConformance]", func() {
name := "image-pull-test"
command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"}
container := ConformanceContainer{
PodClient: f.PodClient(),
Container: v1.Container{
Name: name,
Image: testCase.image,
Command: command,
// PullAlways makes sure that the image will always be pulled even if it is present before the test.
ImagePullPolicy: v1.PullAlways,
},
RestartPolicy: v1.RestartPolicyNever,
}
if testCase.secret {
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred())
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
container.ImagePullSecrets = []string{secret.Name}
}
// checkContainerStatus checks whether the container status matches expectation.
checkContainerStatus := func() error {
status, err := container.GetStatus()
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
}
// We need to check container state first. The default pod status is pending, If we check
// pod phase first, and the expected pod phase is Pending, the container status may not
// even show up when we check it.
// Check container state
if !testCase.waiting {
if status.State.Running == nil {
return fmt.Errorf("expected container state: Running, got: %q",
GetContainerState(status.State))
}
}
if testCase.waiting {
if status.State.Waiting == nil {
return fmt.Errorf("expected container state: Waiting, got: %q",
GetContainerState(status.State))
}
reason := status.State.Waiting.Reason
if reason != images.ErrImagePull.Error() &&
reason != images.ErrImagePullBackOff.Error() {
return fmt.Errorf("unexpected waiting reason: %q", reason)
}
}
// Check pod phase
phase, err := container.GetPhase()
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
}
if phase != testCase.phase {
return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase)
}
return nil
}
// The image registry is not stable, which sometimes causes the test to fail. Add retry mechanism to make this
// less flaky.
const flakeRetry = 3
for i := 1; i <= flakeRetry; i++ {
var err error
By("create the container")
container.Create()
By("check the container status")
for start := time.Now(); time.Since(start) < ContainerStatusRetryTimeout; time.Sleep(ContainerStatusPollInterval) {
if err = checkContainerStatus(); err == nil {
break
}
}
By("delete the container")
container.Delete()
if err == nil {
break
}
if i < flakeRetry {
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else {
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
}
}
})
}
})
})
})

View File

@ -23,17 +23,19 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("[sig-api-machinery] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-env-vars
Description: Ensure that secret can be consumed via environment
variables.
Release : v1.9
Testname: Secrets, pod environment field
Description: Create a secret. Create a Pod with Container that declares a environment variable which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
@ -53,7 +55,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
Containers: []v1.Container{
{
Name: "secret-env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
@ -80,9 +82,9 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
})
/*
Testname: secret-configmaps-source
Description: Ensure that secret can be consumed via source of a set
of ConfigMaps.
Release : v1.9
Testname: Secrets, pod environment from source
Description: Create a secret. Create a Pod with Container that declares a environment variable using EnvFrom which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret.
*/
framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
@ -101,7 +103,7 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
Containers: []v1.Container{
{
Name: "env-test",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{
@ -123,6 +125,11 @@ var _ = Describe("[sig-api-machinery] Secrets", func() {
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
})
})
It("should fail to create secret in volume due to empty secret key", func() {
secret, err := createEmptyKeySecretForTest(f)
Expect(err).To(HaveOccurred(), "created secret %q with empty key in namespace %q", secret.Name, f.Namespace.Name)
})
})
func newEnvFromSecret(namespace, name string) *v1.Secret {
@ -138,3 +145,18 @@ func newEnvFromSecret(namespace, name string) *v1.Secret {
},
}
}
func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) {
secretName := "secret-emptyKey-test-" + string(uuid.NewUUID())
secret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: secretName,
},
Data: map[string][]byte{
"": []byte("value-1\n"),
},
}
By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
}

View File

@ -25,6 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -34,18 +35,18 @@ var _ = Describe("[sig-storage] Secrets", func() {
f := framework.NewDefaultFramework("secrets")
/*
Testname: secret-volume-mount-without-mapping
Description: Ensure that secret can be mounted without mapping to a
pod volume.
Release : v1.9
Testname: Secrets Volume, default
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
/*
Testname: secret-volume-mount-without-mapping-default-mode
Description: Ensure that secret can be mounted without mapping to a
pod volume in default mode.
Release : v1.9
Testname: Secrets Volume, volume mode 0400
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r——--—-—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [NodeConformance]", func() {
defaultMode := int32(0400)
@ -53,9 +54,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-volume-mount-without-mapping-non-root-default-mode-fsgroup
Description: Ensure that secret can be mounted without mapping to a pod
volume as non-root in default mode with fsGroup set.
Release : v1.9
Testname: Secrets Volume, volume mode 0440, fsGroup 1001 and uid 1000
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r——r-—-—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [NodeConformance]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@ -65,25 +66,30 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-volume-mount-with-mapping
Description: Ensure that secret can be mounted with mapping to a pod
volume.
Release : v1.9
Testname: Secrets Volume, mapping
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw—r-—r—- by default.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
doSecretE2EWithMapping(f, nil)
})
/*
Testname: secret-volume-mount-with-mapping-item-mode
Description: Ensure that secret can be mounted with mapping to a pod
volume in item mode.
Release : v1.9
Testname: Secrets Volume, mapping, volume mode 0400
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r-—r-—r—-.
*/
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [NodeConformance]", func() {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
/*
Release : v1.12
Testname: Secrets Volume, volume mode default, secret with same name in different namespace
Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default.
*/
framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
var (
namespace2 *v1.Namespace
err error
@ -105,8 +111,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-multiple-volume-mounts
Description: Ensure that secret can be mounted to multiple pod volumes.
Release : v1.9
Testname: Secrets Volume, mapping multiple volume paths
Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths.
*/
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
// This test ensures that the same secret can be mounted in multiple
@ -153,7 +160,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
@ -182,9 +189,9 @@ var _ = Describe("[sig-storage] Secrets", func() {
})
/*
Testname: secret-mounted-volume-optional-update-change
Description: Ensure that optional update change to secret can be
reflected on a mounted volume.
Release : v1.9
Testname: Secrets Volume, create, update and delete
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
*/
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
@ -279,7 +286,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
Containers: []v1.Container{
{
Name: deleteContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -291,7 +298,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
{
Name: updateContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
VolumeMounts: []v1.VolumeMount{
{
@ -303,7 +310,7 @@ var _ = Describe("[sig-storage] Secrets", func() {
},
{
Name: createContainerName,
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
@ -357,6 +364,26 @@ var _ = Describe("[sig-storage] Secrets", func() {
Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-3"))
Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
})
//The secret is in pending during volume creation until the secret objects are available
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
//Secret object defined for the pod, If a key is specified which is not present in the secret,
// the volume setup will error unless it is marked optional, during the pod creation.
//Slow (~5 mins)
It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
volumeMountPath := "/etc/secret-volumes"
podName := "pod-secrets-" + string(uuid.NewUUID())
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
Expect(err).To(HaveOccurred(), "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
})
})
func secretForTest(namespace, name string) *v1.Secret {
@ -406,7 +433,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
@ -483,7 +510,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: mountImage,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},
@ -514,3 +541,112 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
createName := "s-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "creates-volume-test"
createVolumeName := "creates-volume"
//creating a pod without secret object created, by mentioning the secret volume source reference name
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: createName,
Optional: &falseValue,
},
},
},
},
Containers: []v1.Container{
{
Name: createContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}
func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error {
podLogTimeout := framework.GetPodSecretUpdateTimeout(f.ClientSet)
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
falseValue := false
createName := "s-test-opt-create-" + string(uuid.NewUUID())
createContainerName := "creates-volume-test"
createVolumeName := "creates-volume"
secret := secretForTest(f.Namespace.Name, createName)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
//creating a pod with secret object, with the key which is not present in secret object.
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: createVolumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: createName,
Items: []v1.KeyToPath{
{
Key: "data_4",
Path: "value-4\n",
},
},
Optional: &falseValue,
},
},
},
},
Containers: []v1.Container{
{
Name: createContainerName,
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Command: []string{"/mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: createVolumeName,
MountPath: path.Join(volumeMountPath, "create"),
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
pod = f.PodClient().Create(pod)
return f.WaitForPodRunning(pod.Name)
}

View File

@ -0,0 +1,266 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"strings"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
Context("When creating a container with runAsUser", func() {
makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
RunAsUser: &userid,
},
},
},
},
}
}
createAndWaitUserPod := func(userid int64) {
podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
userid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
/*
Release : v1.12
Testname: Security Context: runAsUser (id:65534)
Description: Container created with runAsUser option, passing an id (id:65534) uses that
given id when running the container.
*/
It("should run the container with uid 65534 [NodeConformance]", func() {
createAndWaitUserPod(65534)
})
/*
Release : v1.12
Testname: Security Context: runAsUser (id:0)
Description: Container created with runAsUser option, passing an id (id:0) uses that
given id when running the container.
*/
It("should run the container with uid 0 [NodeConformance]", func() {
createAndWaitUserPod(0)
})
})
Context("When creating a pod with readOnlyRootFilesystem", func() {
makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
},
},
},
},
}
}
createAndWaitUserPod := func(readOnlyRootFilesystem bool) string {
podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "touch checkfile"},
readOnlyRootFilesystem,
))
if readOnlyRootFilesystem {
podClient.WaitForFailure(podName, framework.PodStartTimeout)
} else {
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
}
return podName
}
/*
Release : v1.12
Testname: Security Context: readOnlyRootFilesystem=true.
Description: when a container has configured readOnlyRootFilesystem to true, write operations are not allowed.
*/
It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [NodeConformance]", func() {
createAndWaitUserPod(true)
})
/*
Release : v1.12
Testname: Security Context: readOnlyRootFilesystem=false.
Description: when a container has configured readOnlyRootFilesystem to false, write operations are allowed.
*/
It("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func() {
createAndWaitUserPod(false)
})
})
Context("When creating a pod with privileged", func() {
makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: image,
Name: podName,
Command: command,
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
},
}
}
createAndWaitUserPod := func(privileged bool) string {
podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
podClient.Create(makeUserPod(podName,
framework.BusyBoxImage,
[]string{"sh", "-c", "ip link add dummy0 type dummy || true"},
privileged,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podName
}
It("should run the container as unprivileged when false [NodeConformance]", func() {
podName := createAndWaitUserPod(false)
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
framework.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") {
framework.Failf("unprivileged container shouldn't be able to create dummy device")
}
})
})
Context("when creating containers with AllowPrivilegeEscalation", func() {
makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
Name: podName,
SecurityContext: &v1.SecurityContext{
AllowPrivilegeEscalation: allowPrivilegeEscalation,
RunAsUser: &uid,
},
},
},
},
}
}
createAndMatchOutput := func(podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
podClient.Create(makeAllowPrivilegeEscalationPod(podName,
allowPrivilegeEscalation,
uid,
))
podClient.WaitForSuccess(podName, framework.PodStartTimeout)
return podClient.MatchContainerOutput(podName, podName, output)
}
/*
Testname: allowPrivilegeEscalation unset and uid != 0.
Description: Configuring the allowPrivilegeEscalation unset, allows the privilege escalation operation.
A container is configured with allowPrivilegeEscalation not specified (nil) and a given uid which is not 0.
When the container is run, the container is run using uid=0.
*/
It("should allow privilege escalation when not explicitly set and uid != 0 [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
/*
Testname: allowPrivilegeEscalation=false.
Description: Configuring the allowPrivilegeEscalation to false, does not allow the privilege escalation operation.
A container is configured with allowPrivilegeEscalation=false and a given uid (1000) which is not 0.
When the container is run, the container is run using uid=1000.
*/
It("should not allow privilege escalation when false [NodeConformance]", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
/*
Testname: allowPrivilegeEscalation=true.
Description: Configuring the allowPrivilegeEscalation to true, allows the privilege escalation operation.
A container is configured with allowPrivilegeEscalation=true and a given uid (1000) which is not 0.
When the container is run, the container is run using uid=0 (making use of the privilege escalation).
*/
It("should allow privilege escalation when true [NodeConformance]", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
})
})

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -42,7 +43,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
Containers: []v1.Container{
{
Name: "test-container",
Image: busyboxImage,
Image: imageutils.GetE2EImage(imageutils.BusyBox),
},
},
RestartPolicy: v1.RestartPolicyNever,

View File

@ -0,0 +1,98 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const dummyFinalizer = "k8s.io/dummy-finalizer"
var _ = framework.KubeDescribe("TTLAfterFinished", func() {
f := framework.NewDefaultFramework("ttlafterfinished")
alphaFeatureStr := "[Feature:TTLAfterFinished]"
It(fmt.Sprintf("Job should be deleted once it finishes after TTL seconds %s", alphaFeatureStr), func() {
testFinishedJob(f)
})
})
func cleanupJob(f *framework.Framework, job *batch.Job) {
ns := f.Namespace.Name
c := f.ClientSet
framework.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
removeFinalizerFunc := func(j *batch.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
}
_, err := framework.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
Expect(err).NotTo(HaveOccurred())
framework.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
err = framework.WaitForAllJobPodsGone(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
}
func testFinishedJob(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
parallelism := int32(1)
completions := int32(1)
backoffLimit := int32(2)
ttl := int32(10)
job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job.Spec.TTLSecondsAfterFinished = &ttl
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
defer cleanupJob(f, job)
framework.Logf("Create a Job %s/%s with TTL", job.Namespace, job.Name)
job, err := framework.CreateJob(c, ns, job)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Wait for the Job to finish")
err = framework.WaitForJobFinish(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Wait for TTL after finished controller to delete the Job")
err = framework.WaitForJobDeleting(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = framework.GetJob(c, ns, job.Name)
Expect(err).NotTo(HaveOccurred())
finishTime := framework.JobFinishTime(job)
finishTimeUTC := finishTime.UTC()
Expect(finishTime.IsZero()).NotTo(BeTrue())
deleteAtUTC := job.ObjectMeta.DeletionTimestamp.UTC()
Expect(deleteAtUTC).NotTo(BeNil())
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
Expect(deleteAtUTC.Before(expireAtUTC)).To(BeFalse())
}

View File

@ -40,11 +40,6 @@ const (
NodeE2E Suite = "node e2e"
)
var (
mountImage = imageutils.GetE2EImage(imageutils.Mounttest)
busyboxImage = "busybox"
)
var CurrentSuite Suite
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
@ -52,20 +47,20 @@ var CurrentSuite Suite
// only used by node e2e test.
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
"busybox",
imageutils.GetE2EImage(imageutils.BusyBox),
imageutils.GetE2EImage(imageutils.EntrypointTester),
imageutils.GetE2EImage(imageutils.IpcUtils),
imageutils.GetE2EImage(imageutils.Liveness),
imageutils.GetE2EImage(imageutils.Mounttest),
imageutils.GetE2EImage(imageutils.MounttestUser),
imageutils.GetE2EImage(imageutils.Netexec),
imageutils.GetE2EImage(imageutils.NginxSlim),
imageutils.GetE2EImage(imageutils.Nginx),
imageutils.GetE2EImage(imageutils.ServeHostname),
imageutils.GetE2EImage(imageutils.TestWebserver),
imageutils.GetE2EImage(imageutils.Hostexec),
imageutils.GetE2EImage(imageutils.VolumeNFSServer),
imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
imageutils.GetE2EImage(imageutils.E2ENet),
imageutils.GetE2EImage(imageutils.Net),
)
func svcByName(name string, port int) *v1.Service {

View File

@ -62,7 +62,7 @@ var _ = Describe("[sig-storage] GCP Volumes", func() {
var c clientset.Interface
BeforeEach(func() {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom")
namespace = f.Namespace
c = f.ClientSet

View File

@ -24,18 +24,16 @@ import (
"testing"
"time"
"github.com/golang/glog"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/util/logs"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/version"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
@ -46,89 +44,18 @@ import (
// ensure auth plugins are loaded
_ "k8s.io/client-go/plugin/pkg/client/auth"
// ensure that cloud providers are loaded
_ "k8s.io/kubernetes/test/e2e/framework/providers/aws"
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
)
var (
cloudConfig = &framework.TestContext.CloudConfig
)
// setupProviderConfig validates and sets up cloudConfig based on framework.TestContext.Provider.
func setupProviderConfig() error {
switch framework.TestContext.Provider {
case "":
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
case "gce", "gke":
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
zone := framework.TestContext.CloudConfig.Zone
region := framework.TestContext.CloudConfig.Region
var err error
if region == "" {
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
}
managedZones := []string{} // Manage all zones in the region
if !framework.TestContext.CloudConfig.MultiZone {
managedZones = []string{zone}
}
gceAlphaFeatureGate := gcecloud.NewAlphaFeatureGate([]string{
gcecloud.AlphaFeatureNetworkEndpointGroup,
})
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
ApiEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
ProjectID: framework.TestContext.CloudConfig.ProjectID,
Region: region,
Zone: zone,
ManagedZones: managedZones,
NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network?
SubnetworkName: "",
NodeTags: nil,
NodeInstancePrefix: "",
TokenSource: nil,
UseMetadataServer: false,
AlphaFeatureGate: gceAlphaFeatureGate})
if err != nil {
return fmt.Errorf("Error building GCE/GKE provider: %v", err)
}
cloudConfig.Provider = gceCloud
// Arbitrarily pick one of the zones we have nodes in
if cloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone {
zones, err := gceCloud.GetAllZonesFromCloudProvider()
if err != nil {
return err
}
cloudConfig.Zone, _ = zones.PopAny()
}
case "aws":
if cloudConfig.Zone == "" {
return fmt.Errorf("gce-zone must be specified for AWS")
}
case "azure":
if cloudConfig.ConfigFile == "" {
return fmt.Errorf("config-file must be specified for Azure")
}
config, err := os.Open(cloudConfig.ConfigFile)
if err != nil {
framework.Logf("Couldn't open cloud provider configuration %s: %#v",
cloudConfig.ConfigFile, err)
}
defer config.Close()
cloudConfig.Provider, err = azure.NewCloud(config)
}
return nil
}
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
@ -140,10 +67,6 @@ func setupProviderConfig() error {
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Run only on Ginkgo node 1
if err := setupProviderConfig(); err != nil {
framework.Failf("Failed to setup provider config: %v", err)
}
switch framework.TestContext.Provider {
case "gce", "gke":
framework.LogClusterImageSources()
@ -151,7 +74,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
c, err := framework.LoadClientset()
if err != nil {
glog.Fatal("Error loading client: ", err)
klog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except those created by the system. This ensures no
@ -166,7 +89,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
glog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
klog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
@ -186,41 +109,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes).
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, framework.ImagePullerLabels); err != nil {
if err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout); err != nil {
// There is no guarantee that the image pulling will succeed in 3 minutes
// and we don't even run the image puller on all platforms (including GKE).
// We wait for it so we get an indication of failures in the logs, and to
// maximize benefit of image pre-pulling.
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", framework.ImagePrePullingTimeout, err)
}
// Dump the output of the nethealth containers only once per run
if framework.TestContext.DumpLogsOnFailure {
logFunc := framework.Logf
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "nethealth.txt")
file, err := os.Create(filePath)
if err != nil {
framework.Logf("Failed to create a file with network health data %v: %v\nPrinting to stdout", filePath, err)
} else {
defer file.Close()
if err = file.Chmod(0644); err != nil {
framework.Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = framework.GetLogToFileFunc(file)
framework.Logf("Dumping network health container logs from all nodes to file %v", filePath)
}
} else {
framework.Logf("Dumping network health container logs from all nodes...")
}
framework.LogContainersInPodsWithLabels(c, metav1.NamespaceSystem, framework.ImagePullerLabels, "nethealth", logFunc)
if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil {
framework.Logf("WARNING: Waiting for all daemonsets to be ready failed: %v", err)
}
// Log the version of the server and this client.
@ -243,20 +140,14 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
}, func(data []byte) {
// Run on all Ginkgo nodes
if cloudConfig.Provider == nil {
if err := setupProviderConfig(); err != nil {
framework.Failf("Failed to setup provider config: %v", err)
}
}
})
// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all node")
framework.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
}, func() {
// Run only Ginkgo on node 1
@ -325,12 +216,12 @@ func RunE2ETests(t *testing.T) {
// TODO: we should probably only be trying to create this directory once
// rather than once-per-Ginkgo-node.
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
glog.Errorf("Failed creating report directory: %v", err)
klog.Errorf("Failed creating report directory: %v", err)
} else {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
}
}
glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
klog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}

View File

@ -17,9 +17,15 @@ limitations under the License.
package e2e
import (
"flag"
"fmt"
"os"
"testing"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/framework/viperconfig"
"k8s.io/kubernetes/test/e2e/generated"
// test sources
_ "k8s.io/kubernetes/test/e2e/apimachinery"
@ -40,8 +46,31 @@ import (
_ "k8s.io/kubernetes/test/e2e/ui"
)
var viperConfig = flag.String("viper-config", "", "The name of a viper config file (https://github.com/spf13/viper#what-is-viper). All e2e command line parameters can also be configured in such a file. May contain a path and may or may not contain the file suffix. The default is to look for an optional file with `e2e` as base name. If a file is specified explicitly, it must be present.")
func init() {
framework.ViperizeFlags()
// Register framework flags, then handle flags and Viper config.
framework.HandleFlags()
if err := viperconfig.ViperizeFlags(*viperConfig, "e2e"); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
framework.AfterReadingAllFlags(&framework.TestContext)
// TODO: Deprecating repo-root over time... instead just use gobindata_util.go , see #23987.
// Right now it is still needed, for example by
// test/e2e/framework/ingress/ingress_utils.go
// for providing the optional secret.yaml file and by
// test/e2e/framework/util.go for cluster/log-dump.
if framework.TestContext.RepoRoot != "" {
testfiles.AddFileSource(testfiles.RootFileSource{Root: framework.TestContext.RepoRoot})
}
// Enable bindata file lookup as fallback.
testfiles.AddFileSource(testfiles.BindataFileSource{
Asset: generated.Asset,
AssetNames: generated.AssetNames,
})
}
func TestE2E(t *testing.T) {

View File

@ -17,28 +17,19 @@ limitations under the License.
package e2e
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -51,19 +42,6 @@ const (
var _ = framework.KubeDescribe("[Feature:Example]", func() {
f := framework.NewDefaultFramework("examples")
// Reusable cluster state function. This won't be adversly affected by lazy initialization of framework.
clusterState := func(selectorKey string, selectorValue string) *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{selectorKey: selectorValue},
ValidPhases: []v1.PodPhase{v1.PodRunning},
})
}
// Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(v1.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn)
}
var c clientset.Interface
var ns string
BeforeEach(func() {
@ -81,330 +59,15 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.ExpectNoError(err)
})
framework.KubeDescribe("Redis", func() {
It("should create and stop redis servers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/redis", file)
}
bootstrapYaml := mkpath("redis-master.yaml")
sentinelServiceYaml := mkpath("redis-sentinel-service.yaml")
sentinelControllerYaml := mkpath("redis-sentinel-controller.yaml")
controllerYaml := mkpath("redis-controller.yaml")
bootstrapPodName := "redis-master"
redisRC := "redis"
sentinelRC := "redis-sentinel"
nsFlag := fmt.Sprintf("--namespace=%v", ns)
expectedOnServer := "The server is now ready to accept connections"
expectedOnSentinel := "+monitor master"
By("starting redis bootstrap")
framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag)
err := framework.WaitForPodNameRunningInNamespace(c, bootstrapPodName, ns)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("setting up services and controllers")
framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{sentinelRC: "true"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
label = labels.SelectorFromSet(labels.Set(map[string]string{"name": redisRC}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
By("scaling up the deployment")
framework.RunKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag)
framework.RunKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag)
framework.WaitForRCToStabilize(c, ns, redisRC, framework.PodReadyBeforeTimeout)
framework.WaitForRCToStabilize(c, ns, sentinelRC, framework.PodReadyBeforeTimeout)
By("checking up the services")
checkAllLogs := func() {
selectorKey, selectorValue := "name", redisRC
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
selectorKey, selectorValue = sentinelRC, "true"
label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
}
checkAllLogs()
By("turning down bootstrap")
framework.RunKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag)
err = framework.WaitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName)
Expect(err).NotTo(HaveOccurred())
By("waiting for the new master election")
checkAllLogs()
})
})
framework.KubeDescribe("Spark", func() {
It("should start spark master, driver and workers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/spark", file)
}
// TODO: Add Zepplin and Web UI to this example.
serviceYaml := mkpath("spark-master-service.yaml")
masterYaml := mkpath("spark-master-controller.yaml")
workerControllerYaml := mkpath("spark-worker-controller.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
master := func() {
By("starting master")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag)
selectorKey, selectorValue := "component", "spark-master"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Now polling for Master startup...")
// Only one master pod: But its a natural way to look up pod names.
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
By("waiting for master endpoint")
err = framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
if maErr != nil {
framework.Failf("Didn't find target string. error: %v", maErr)
}
})
}
worker := func() {
By("starting workers")
framework.Logf("Now starting Workers")
framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag)
selectorKey, selectorValue := "component", "spark-worker"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
// For now, scaling is orthogonal to the core test.
// framework.ScaleRC(c, ns, "spark-worker-controller", 2, true)
framework.Logf("Now polling for worker startup...")
forEachPod(selectorKey, selectorValue,
func(pod v1.Pod) {
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(slaveErr).NotTo(HaveOccurred())
})
}
// Run the worker verification after we turn up the master.
defer worker()
master()
})
})
framework.KubeDescribe("Cassandra", func() {
It("should create and scale cassandra", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
}
serviceYaml := mkpath("cassandra-service.yaml")
controllerYaml := mkpath("cassandra-controller.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("Starting the cassandra service")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.Logf("wait for service")
err := framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
Expect(err).NotTo(HaveOccurred())
// Create an RC with n nodes in it. Each node will then be verified.
By("Creating a Cassandra RC")
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod("app", "cassandra", func(pod v1.Pod) {
framework.Logf("Verifying pod %v ", pod.Name)
// TODO how do we do this better? Ready Probe?
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
}
})
})
})
framework.KubeDescribe("CassandraStatefulSet", func() {
It("should create statefulset", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
}
serviceYaml := mkpath("cassandra-service.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
// have to change dns prefix because of the dynamic namespace
input := generated.ReadOrDie(mkpath("cassandra-statefulset.yaml"))
output := strings.Replace(string(input), "cassandra-0.cassandra.default.svc.cluster.local", "cassandra-0.cassandra."+ns+".svc.cluster.local", -1)
statefulsetYaml := "/tmp/cassandra-statefulset.yaml"
err := ioutil.WriteFile(statefulsetYaml, []byte(output), 0644)
Expect(err).NotTo(HaveOccurred())
By("Starting the cassandra service")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.Logf("wait for service")
err = framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
Expect(err).NotTo(HaveOccurred())
// Create an StatefulSet with n nodes in it. Each node will then be verified.
By("Creating a Cassandra StatefulSet")
framework.RunKubectlOrDie("create", "-f", statefulsetYaml, nsFlag)
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
// TODO - parse this number out of the yaml
numPets := 3
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: label.String()})
if err != nil {
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
}
framework.ExpectNoError(err)
if len(podList.Items) < numPets {
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if len(podList.Items) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := podutil.IsPodReady(&p)
if p.Status.Phase != v1.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
}
})
// using out of statefulset e2e as deleting pvc is a pain
framework.DeleteAllStatefulSets(c, ns)
})
})
framework.KubeDescribe("Storm", func() {
It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storm", file)
}
zookeeperServiceJson := mkpath("zookeeper-service.json")
zookeeperPodJson := mkpath("zookeeper.json")
nimbusServiceJson := mkpath("storm-nimbus-service.json")
nimbusPodJson := mkpath("storm-nimbus.json")
workerControllerJson := mkpath("storm-worker-controller.json")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
zookeeperPod := "zookeeper"
nimbusPod := "nimbus"
By("starting Zookeeper")
framework.RunKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag)
framework.RunKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag)
err := f.WaitForPodRunningSlow(zookeeperPod)
Expect(err).NotTo(HaveOccurred())
By("checking if zookeeper is up and running")
_, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForEndpoint(c, ns, "zookeeper")
Expect(err).NotTo(HaveOccurred())
By("starting Nimbus")
framework.RunKubectlOrDie("create", "-f", nimbusPodJson, nsFlag)
framework.RunKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag)
err = f.WaitForPodRunningSlow(nimbusPod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForEndpoint(c, ns, "nimbus")
Expect(err).NotTo(HaveOccurred())
By("starting workers")
framework.RunKubectlOrDie("create", "-f", workerControllerJson, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "storm-worker"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod("name", "storm-worker", func(pod v1.Pod) {
//do nothing, just wait for the pod to be running
})
// TODO: Add logging configuration to nimbus & workers images and then
// look for a string instead of sleeping.
time.Sleep(20 * time.Second)
By("checking if there are established connections to Zookeeper")
_, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking if Nimbus responds to requests")
framework.LookForString("No topologies running.", time.Minute, func() string {
return framework.RunKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list")
})
})
})
framework.KubeDescribe("Liveness", func() {
It("liveness pods should be automatically restarted", func() {
mkpath := func(file string) string {
path := filepath.Join("test/fixtures/doc-yaml/user-guide/liveness", file)
framework.ExpectNoError(createFileForGoBinData(path, path))
return path
}
execYaml := mkpath("exec-liveness.yaml")
httpYaml := mkpath("http-liveness.yaml")
test := "test/fixtures/doc-yaml/user-guide/liveness"
execYaml := readFile(test, "exec-liveness.yaml")
httpYaml := readFile(test, "http-liveness.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, execYaml), nsFlag)
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, httpYaml), nsFlag)
framework.RunKubectlOrDieInput(execYaml, "create", "-f", "-", nsFlag)
framework.RunKubectlOrDieInput(httpYaml, "create", "-f", "-", nsFlag)
// Since both containers start rapidly, we can easily run this test in parallel.
var wg sync.WaitGroup
@ -446,20 +109,16 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.KubeDescribe("Secret", func() {
It("should create a pod that reads a secret", func() {
mkpath := func(file string) string {
path := filepath.Join("test/fixtures/doc-yaml/user-guide/secrets", file)
framework.ExpectNoError(createFileForGoBinData(path, path))
return path
}
secretYaml := mkpath("secret.yaml")
podYaml := mkpath("secret-pod.yaml")
test := "test/fixtures/doc-yaml/user-guide/secrets"
secretYaml := readFile(test, "secret.yaml")
podYaml := readFile(test, "secret-pod.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
podName := "secret-test-pod"
By("creating secret and pod")
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, secretYaml), nsFlag)
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, podYaml), nsFlag)
framework.RunKubectlOrDieInput(secretYaml, "create", "-f", "-", nsFlag)
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred())
@ -471,17 +130,13 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.KubeDescribe("Downward API", func() {
It("should create a pod that prints his name and namespace", func() {
mkpath := func(file string) string {
path := filepath.Join("test/fixtures/doc-yaml/user-guide/downward-api", file)
framework.ExpectNoError(createFileForGoBinData(path, path))
return path
}
podYaml := mkpath("dapi-pod.yaml")
test := "test/fixtures/doc-yaml/user-guide/downward-api"
podYaml := readFile(test, "dapi-pod.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
podName := "dapi-test-pod"
By("creating the pod")
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, podYaml), nsFlag)
framework.RunKubectlOrDieInput(podYaml, "create", "-f", "-", nsFlag)
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred())
@ -492,125 +147,9 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred())
})
})
framework.KubeDescribe("RethinkDB", func() {
It("should create and stop rethinkdb servers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/rethinkdb", file)
}
driverServiceYaml := mkpath("driver-service.yaml")
rethinkDbControllerYaml := mkpath("rc.yaml")
adminPodYaml := mkpath("admin-pod.yaml")
adminServiceYaml := mkpath("admin-service.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting rethinkdb")
framework.RunKubectlOrDie("create", "-f", driverServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"db": "rethinkdb"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
checkDbInstances := func() {
forEachPod("db", "rethinkdb", func(pod v1.Pod) {
_, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
}
checkDbInstances()
err = framework.WaitForEndpoint(c, ns, "rethinkdb-driver")
Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "rethinkdb-rc", 2, true)
checkDbInstances()
By("starting admin")
framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag)
err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns)
Expect(err).NotTo(HaveOccurred())
checkDbInstances()
content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout)
Expect(err).NotTo(HaveOccurred())
if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") {
framework.Failf("RethinkDB console is not running")
}
})
})
framework.KubeDescribe("Hazelcast", func() {
It("should create and scale hazelcast", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/hazelcast", file)
}
serviceYaml := mkpath("hazelcast-service.yaml")
deploymentYaml := mkpath("hazelcast-deployment.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting hazelcast")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", deploymentYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
err = framework.WaitForEndpoint(c, ns, "hazelcast")
Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, "hazelcast", 2, true)
forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
})
})
})
func makeHttpRequestToService(c clientset.Interface, ns, service, path string, timeout time.Duration) (string, error) {
var result []byte
var err error
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
break
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err = proxyRequest.Namespace(ns).
Context(ctx).
Name(service).
Suffix(path).
Do().
Raw()
if err != nil {
break
}
}
return string(result), err
}
func createFileForGoBinData(gobindataPath, outputFilename string) error {
data := generated.ReadOrDie(gobindataPath)
if len(data) == 0 {
return fmt.Errorf("Failed to read gobindata from %v", gobindataPath)
}
fullPath := filepath.Join(framework.TestContext.OutputDir, outputFilename)
err := os.MkdirAll(filepath.Dir(fullPath), 0777)
if err != nil {
return fmt.Errorf("Error while creating directory %v: %v", filepath.Dir(fullPath), err)
}
err = ioutil.WriteFile(fullPath, data, 0644)
if err != nil {
return fmt.Errorf("Error while trying to write to file %v: %v", fullPath, err)
}
return nil
func readFile(test, file string) string {
from := filepath.Join(test, file)
return string(testfiles.ReadOrDie(from, Fail))
}

View File

@ -1,10 +1,6 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
@ -12,14 +8,14 @@ go_library(
"authorizer_util.go",
"cleanup.go",
"crd_util.go",
"create.go",
"deployment_util.go",
"exec_util.go",
"firewall_util.go",
"flake_reporting_util.go",
"framework.go",
"get-kubemark-resource-usage.go",
"google_compute.go",
"gpu_util.go",
"ingress_utils.go",
"jobs_util.go",
"kubelet_stats.go",
"log_size_monitoring.go",
@ -29,6 +25,7 @@ go_library(
"perf_util.go",
"pods.go",
"profile_gatherer.go",
"provider.go",
"psp_util.go",
"pv_util.go",
"rc_util.go",
@ -52,107 +49,101 @@ go_library(
"//pkg/apis/extensions:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/cloudprovider/providers/azure:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/job:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/apis/config:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/kubemark:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
"//pkg/ssh:go_default_library",
"//pkg/util/file:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/taints:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
"//staging/src/k8s.io/api/authorization/v1beta1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/client-go/discovery:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/types:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/github.com/spf13/viper:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/golang.org/x/net/websocket:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/google.golang.org/api/googleapi:go_default_library",
"//vendor/k8s.io/api/apps/v1:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/restmapper:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
@ -168,15 +159,18 @@ filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/ginkgowrapper:all-srcs",
"//test/e2e/framework/ingress:all-srcs",
"//test/e2e/framework/metrics:all-srcs",
"//test/e2e/framework/podlogs:all-srcs",
"//test/e2e/framework/providers/aws:all-srcs",
"//test/e2e/framework/providers/azure:all-srcs",
"//test/e2e/framework/providers/gce:all-srcs",
"//test/e2e/framework/providers/kubemark:all-srcs",
"//test/e2e/framework/testfiles:all-srcs",
"//test/e2e/framework/timer:all-srcs",
"//test/e2e/framework/viperconfig:all-srcs",
],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["firewall_util_test.go"],
embed = [":go_default_library"],
)

View File

@ -17,7 +17,7 @@ limitations under the License.
package framework
import (
"fmt"
"k8s.io/klog"
"sync"
"time"
@ -62,7 +62,7 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
if apierrors.IsNotFound(err) {
fmt.Printf("SubjectAccessReview endpoint is missing\n")
klog.Info("SubjectAccessReview endpoint is missing")
time.Sleep(1 * time.Second)
return true, nil
}
@ -94,7 +94,7 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
klog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
}
}
@ -124,7 +124,7 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
klog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
}
}
@ -140,7 +140,7 @@ func IsRBACEnabled(f *Framework) bool {
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false
} else if crs == nil || len(crs.Items) == 0 {
Logf("No ClusteRoles found; assuming RBAC is disabled.")
Logf("No ClusterRoles found; assuming RBAC is disabled.")
isRBACEnabled = false
} else {
Logf("Found ClusterRoles; assuming RBAC is enabled.")

View File

@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["config.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/config",
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["config_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,234 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config simplifies the declaration of configuration options.
// Right now the implementation maps them directly command line
// flags. When combined with test/e2e/framework/viper in a test suite,
// those flags then can also be read from a config file.
//
// Instead of defining flags one-by-one, developers annotate a
// structure with tags and then call a single function. This is the
// same approach as in https://godoc.org/github.com/jessevdk/go-flags,
// but implemented so that a test suite can continue to use the normal
// "flag" package.
//
// For example, a file storage/csi.go might define:
//
// var scaling struct {
// NumNodes int `default:"1" description:"number of nodes to run on"`
// Master string
// }
// _ = config.AddOptions(&scaling, "storage.csi.scaling")
//
// This defines the following command line flags:
//
// -storage.csi.scaling.numNodes=<int> - number of nodes to run on (default: 1)
// -storage.csi.scaling.master=<string>
//
// All fields in the structure must be exported and have one of the following
// types (same as in the `flag` package):
// - bool
// - time.Duration
// - float64
// - string
// - int
// - int64
// - uint
// - uint64
// - and/or nested or embedded structures containing those basic types.
//
// Each basic entry may have a tag with these optional keys:
//
// usage: additional explanation of the option
// default: the default value, in the same format as it would
// be given on the command line and true/false for
// a boolean
//
// The names of the final configuration options are a combination of an
// optional common prefix for all options in the structure and the
// name of the fields, concatenated with a dot. To get names that are
// consistent with the command line flags defined by `ginkgo`, the
// initial character of each field name is converted to lower case.
//
// There is currently no support for aliases, so renaming the fields
// or the common prefix will be visible to users of the test suite and
// may breaks scripts which use the old names.
//
// The variable will be filled with the actual values by the test
// suite before running tests. Beware that the code which registers
// Ginkgo tests cannot use those config options, because registering
// tests and options both run before the E2E test suite handles
// parameters.
package config
import (
"flag"
"fmt"
"reflect"
"strconv"
"time"
"unicode"
"unicode/utf8"
)
// CommandLine is the flag set that AddOptions adds to. Usually this
// is the same as the default in the flag package, but can also be
// something else (for example during testing).
var CommandLine = flag.CommandLine
// AddOptions analyzes the options value and creates the necessary
// flags to populate it.
//
// The prefix can be used to root the options deeper in the overall
// set of options, with a dot separating different levels.
//
// The function always returns true, to enable this simplified
// registration of options:
// _ = AddOptions(...)
//
// It panics when it encounters an error, like unsupported types
// or option name conflicts.
func AddOptions(options interface{}, prefix string) bool {
optionsType := reflect.TypeOf(options)
if optionsType == nil {
panic("options parameter without a type - nil?!")
}
if optionsType.Kind() != reflect.Ptr || optionsType.Elem().Kind() != reflect.Struct {
panic(fmt.Sprintf("need a pointer to a struct, got instead: %T", options))
}
addStructFields(optionsType.Elem(), reflect.Indirect(reflect.ValueOf(options)), prefix)
return true
}
func addStructFields(structType reflect.Type, structValue reflect.Value, prefix string) {
for i := 0; i < structValue.NumField(); i++ {
entry := structValue.Field(i)
addr := entry.Addr()
structField := structType.Field(i)
name := structField.Name
r, n := utf8.DecodeRuneInString(name)
name = string(unicode.ToLower(r)) + name[n:]
usage := structField.Tag.Get("usage")
def := structField.Tag.Get("default")
if prefix != "" {
name = prefix + "." + name
}
if structField.PkgPath != "" {
panic(fmt.Sprintf("struct entry %q not exported", name))
}
ptr := addr.Interface()
if structField.Anonymous {
// Entries in embedded fields are treated like
// entries, in the struct itself, i.e. we add
// them with the same prefix.
addStructFields(structField.Type, entry, prefix)
continue
}
if structField.Type.Kind() == reflect.Struct {
// Add nested options.
addStructFields(structField.Type, entry, name)
continue
}
// We could switch based on structField.Type. Doing a
// switch after getting an interface holding the
// pointer to the entry has the advantage that we
// immediately have something that we can add as flag
// variable.
//
// Perhaps generics will make this entire switch redundant someday...
switch ptr := ptr.(type) {
case *bool:
var defValue bool
parseDefault(&defValue, name, def)
CommandLine.BoolVar(ptr, name, defValue, usage)
case *time.Duration:
var defValue time.Duration
parseDefault(&defValue, name, def)
CommandLine.DurationVar(ptr, name, defValue, usage)
case *float64:
var defValue float64
parseDefault(&defValue, name, def)
CommandLine.Float64Var(ptr, name, defValue, usage)
case *string:
CommandLine.StringVar(ptr, name, def, usage)
case *int:
var defValue int
parseDefault(&defValue, name, def)
CommandLine.IntVar(ptr, name, defValue, usage)
case *int64:
var defValue int64
parseDefault(&defValue, name, def)
CommandLine.Int64Var(ptr, name, defValue, usage)
case *uint:
var defValue uint
parseDefault(&defValue, name, def)
CommandLine.UintVar(ptr, name, defValue, usage)
case *uint64:
var defValue uint64
parseDefault(&defValue, name, def)
CommandLine.Uint64Var(ptr, name, defValue, usage)
default:
panic(fmt.Sprintf("unsupported struct entry type %q: %T", name, entry.Interface()))
}
}
}
// parseDefault is necessary because "flag" wants the default in the
// actual type and cannot take a string. It would be nice to reuse the
// existing code for parsing from the "flag" package, but it isn't
// exported.
func parseDefault(value interface{}, name, def string) {
if def == "" {
return
}
checkErr := func(err error, value interface{}) {
if err != nil {
panic(fmt.Sprintf("invalid default %q for %T entry %s: %s", def, value, name, err))
}
}
switch value := value.(type) {
case *bool:
v, err := strconv.ParseBool(def)
checkErr(err, *value)
*value = v
case *time.Duration:
v, err := time.ParseDuration(def)
checkErr(err, *value)
*value = v
case *float64:
v, err := strconv.ParseFloat(def, 64)
checkErr(err, *value)
*value = v
case *int:
v, err := strconv.Atoi(def)
checkErr(err, *value)
*value = v
case *int64:
v, err := strconv.ParseInt(def, 0, 64)
checkErr(err, *value)
*value = v
case *uint:
v, err := strconv.ParseUint(def, 0, strconv.IntSize)
checkErr(err, *value)
*value = uint(v)
case *uint64:
v, err := strconv.ParseUint(def, 0, 64)
checkErr(err, *value)
*value = v
default:
panic(fmt.Sprintf("%q: setting defaults not supported for type %T", name, value))
}
}

View File

@ -0,0 +1,258 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"flag"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInt(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
var context struct {
Number int `default:"5" usage:"some number"`
}
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "number",
usage: "some number",
defValue: "5",
}},
allFlags(CommandLine))
assert.Equal(t, 5, context.Number)
}
func TestLower(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
var context struct {
Ähem string
MixedCase string
}
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "mixedCase",
},
{
name: "ähem",
},
},
allFlags(CommandLine))
}
func TestPrefix(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
var context struct {
Number int `usage:"some number"`
}
require.NotPanics(t, func() {
AddOptions(&context, "some.prefix")
})
require.Equal(t, []simpleFlag{
{
name: "some.prefix.number",
usage: "some number",
defValue: "0",
}},
allFlags(CommandLine))
}
func TestRecursion(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
type Nested struct {
Number1 int `usage:"embedded number"`
}
var context struct {
Nested
A struct {
B struct {
C struct {
Number2 int `usage:"some number"`
}
}
}
}
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "a.b.c.number2",
usage: "some number",
defValue: "0",
},
{
name: "number1",
usage: "embedded number",
defValue: "0",
},
},
allFlags(CommandLine))
}
func TestPanics(t *testing.T) {
assert.PanicsWithValue(t, `invalid default "a" for int entry prefix.number: strconv.Atoi: parsing "a": invalid syntax`, func() {
var context struct {
Number int `default:"a"`
}
AddOptions(&context, "prefix")
})
assert.PanicsWithValue(t, `invalid default "10000000000000000000" for int entry prefix.number: strconv.Atoi: parsing "10000000000000000000": value out of range`, func() {
var context struct {
Number int `default:"10000000000000000000"`
}
AddOptions(&context, "prefix")
})
assert.PanicsWithValue(t, `options parameter without a type - nil?!`, func() {
AddOptions(nil, "")
})
assert.PanicsWithValue(t, `need a pointer to a struct, got instead: *int`, func() {
number := 0
AddOptions(&number, "")
})
assert.PanicsWithValue(t, `struct entry "prefix.number" not exported`, func() {
var context struct {
number int
}
AddOptions(&context, "prefix")
})
assert.PanicsWithValue(t, `unsupported struct entry type "prefix.someNumber": config.MyInt`, func() {
type MyInt int
var context struct {
SomeNumber MyInt
}
AddOptions(&context, "prefix")
})
}
func TestTypes(t *testing.T) {
CommandLine = flag.NewFlagSet("test", 0)
type Context struct {
Bool bool `default:"true"`
Duration time.Duration `default:"1ms"`
Float64 float64 `default:"1.23456789"`
String string `default:"hello world"`
Int int `default:"-1" usage:"some number"`
Int64 int64 `default:"-1234567890123456789"`
Uint uint `default:"1"`
Uint64 uint64 `default:"1234567890123456789"`
}
var context Context
require.NotPanics(t, func() {
AddOptions(&context, "")
})
require.Equal(t, []simpleFlag{
{
name: "bool",
defValue: "true",
isBool: true,
},
{
name: "duration",
defValue: "1ms",
},
{
name: "float64",
defValue: "1.23456789",
},
{
name: "int",
usage: "some number",
defValue: "-1",
},
{
name: "int64",
defValue: "-1234567890123456789",
},
{
name: "string",
defValue: "hello world",
},
{
name: "uint",
defValue: "1",
},
{
name: "uint64",
defValue: "1234567890123456789",
},
},
allFlags(CommandLine))
assert.Equal(t,
Context{true, time.Millisecond, 1.23456789, "hello world",
-1, -1234567890123456789, 1, 1234567890123456789,
},
context,
"default values must match")
require.NoError(t, CommandLine.Parse([]string{
"-int", "-2",
"-int64", "-9123456789012345678",
"-uint", "2",
"-uint64", "9123456789012345678",
"-string", "pong",
"-float64", "-1.23456789",
"-bool=false",
"-duration=1s",
}))
assert.Equal(t,
Context{false, time.Second, -1.23456789, "pong",
-2, -9123456789012345678, 2, 9123456789012345678,
},
context,
"parsed values must match")
}
func allFlags(fs *flag.FlagSet) []simpleFlag {
var flags []simpleFlag
fs.VisitAll(func(f *flag.Flag) {
s := simpleFlag{
name: f.Name,
usage: f.Usage,
defValue: f.DefValue,
}
type boolFlag interface {
flag.Value
IsBoolFlag() bool
}
if fv, ok := f.Value.(boolFlag); ok && fv.IsBoolFlag() {
s.isBool = true
}
flags = append(flags, s)
})
return flags
}
type simpleFlag struct {
name string
usage string
defValue string
isBool bool
}

View File

@ -21,7 +21,7 @@ import (
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/testserver"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
@ -35,25 +35,23 @@ type TestCrd struct {
Name string
Kind string
ApiGroup string
ApiVersion string
Versions []apiextensionsv1beta1.CustomResourceDefinitionVersion
ApiExtensionClient *crdclientset.Clientset
Crd *apiextensionsv1beta1.CustomResourceDefinition
DynamicClient dynamic.ResourceInterface
DynamicClients map[string]dynamic.ResourceInterface
CleanUp CleanCrdFn
}
// CreateTestCRD creates a new CRD specifically for the calling test.
func CreateTestCRD(f *Framework) (*TestCrd, error) {
func CreateMultiVersionTestCRD(f *Framework, group string, apiVersions []apiextensionsv1beta1.CustomResourceDefinitionVersion, conversionWebhook *apiextensionsv1beta1.WebhookClientConfig) (*TestCrd, error) {
suffix := randomSuffix()
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, suffix)
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, suffix)
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
apiVersion := "v1"
testcrd := &TestCrd{
Name: name,
Kind: kind,
ApiGroup: group,
ApiVersion: apiVersion,
Name: name,
Kind: kind,
ApiGroup: group,
Versions: apiVersions,
}
// Creating a custom resource definition for use by assorted tests.
@ -75,21 +73,33 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
crd := newCRDForTest(testcrd)
if conversionWebhook != nil {
crd.Spec.Conversion = &apiextensionsv1beta1.CustomResourceConversion{
Strategy: "Webhook",
WebhookClientConfig: conversionWebhook,
}
}
//create CRD and waits for the resource to be recognized and available.
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
crd, err = fixtures.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
if err != nil {
Failf("failed to create CustomResourceDefinition: %v", err)
return nil, err
}
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}
resourceClient := dynamicClient.Resource(gvr).Namespace(f.Namespace.Name)
resourceClients := map[string]dynamic.ResourceInterface{}
for _, v := range crd.Spec.Versions {
if v.Served {
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: v.Name, Resource: crd.Spec.Names.Plural}
resourceClients[v.Name] = dynamicClient.Resource(gvr).Namespace(f.Namespace.Name)
}
}
testcrd.ApiExtensionClient = apiExtensionClient
testcrd.Crd = crd
testcrd.DynamicClient = resourceClient
testcrd.DynamicClients = resourceClients
testcrd.CleanUp = func() error {
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionClient)
if err != nil {
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
}
@ -98,13 +108,26 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
return testcrd, nil
}
// CreateTestCRD creates a new CRD specifically for the calling test.
func CreateTestCRD(f *Framework) (*TestCrd, error) {
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
apiVersions := []apiextensionsv1beta1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
},
}
return CreateMultiVersionTestCRD(f, group, apiVersions, nil)
}
// newCRDForTest generates a CRD definition for the test
func newCRDForTest(testcrd *TestCrd) *apiextensionsv1beta1.CustomResourceDefinition {
return &apiextensionsv1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{Name: testcrd.GetMetaName()},
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
Group: testcrd.ApiGroup,
Version: testcrd.ApiVersion,
Group: testcrd.ApiGroup,
Versions: testcrd.Versions,
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
Plural: testcrd.GetPluralName(),
Singular: testcrd.Name,
@ -130,3 +153,17 @@ func (c *TestCrd) GetPluralName() string {
func (c *TestCrd) GetListName() string {
return c.Name + "List"
}
func (c *TestCrd) GetAPIVersions() []string {
ret := []string{}
for _, v := range c.Versions {
if v.Served {
ret = append(ret, v.Name)
}
}
return ret
}
func (c *TestCrd) GetV1DynamicClient() dynamic.ResourceInterface {
return c.DynamicClients["v1"]
}

603
vendor/k8s.io/kubernetes/test/e2e/framework/create.go generated vendored Normal file
View File

@ -0,0 +1,603 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"encoding/json"
"fmt"
"github.com/pkg/errors"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
storage "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
// LoadFromManifests loads .yaml or .json manifest files and returns
// all items that it finds in them. It supports all items for which
// there is a factory registered in Factories and .yaml files with
// multiple items separated by "---". Files are accessed via the
// "testfiles" package, which means they can come from a file system
// or be built into the binary.
//
// LoadFromManifests has some limitations:
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
// and silently ignored
// - the latest stable API version for each item is used, regardless of what
// is specified in the manifest files
func (f *Framework) LoadFromManifests(files ...string) ([]interface{}, error) {
var items []interface{}
err := visitManifests(func(data []byte) error {
// Ignore any additional fields for now, just determine what we have.
var what What
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, &what); err != nil {
return errors.Wrap(err, "decode TypeMeta")
}
factory := Factories[what]
if factory == nil {
return errors.Errorf("item of type %+v not supported", what)
}
object := factory.New()
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, object); err != nil {
return errors.Wrapf(err, "decode %+v", what)
}
items = append(items, object)
return nil
}, files...)
return items, err
}
func visitManifests(cb func([]byte) error, files ...string) error {
for _, fileName := range files {
data, err := testfiles.Read(fileName)
if err != nil {
Failf("reading manifest file: %v", err)
}
// Split at the "---" separator before working on
// individual item. Only works for .yaml.
//
// We need to split ourselves because we need access
// to each original chunk of data for
// runtime.DecodeInto. kubectl has its own
// infrastructure for this, but that is a lot of code
// with many dependencies.
items := bytes.Split(data, []byte("\n---"))
for _, item := range items {
if err := cb(item); err != nil {
return errors.Wrap(err, fileName)
}
}
}
return nil
}
// PatchItems modifies the given items in place such that each test
// gets its own instances, to avoid conflicts between different tests
// and between tests and normal deployments.
//
// This is done by:
// - creating namespaced items inside the test's namespace
// - changing the name of non-namespaced items like ClusterRole
//
// PatchItems has some limitations:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func (f *Framework) PatchItems(items ...interface{}) error {
for _, item := range items {
// Uncomment when debugging the loading and patching of items.
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
if err := f.patchItemRecursively(item); err != nil {
return err
}
}
return nil
}
// CreateItems creates the items. Each of them must be an API object
// of a type that is registered in Factory.
//
// It returns either a cleanup function or an error, but never both.
//
// Cleaning up after a test can be triggered in two ways:
// - the test invokes the returned cleanup function,
// usually in an AfterEach
// - the test suite terminates, potentially after
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
//
// PatchItems has the some limitations as LoadFromManifests:
// - only some common items are supported, unknown ones trigger an error
// - only the latest stable API version for each item is supported
func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
var destructors []func() error
var cleanupHandle CleanupActionHandle
cleanup := func() {
if cleanupHandle == nil {
// Already done.
return
}
RemoveCleanupAction(cleanupHandle)
// TODO (?): use same logic as framework.go for determining
// whether we are expected to clean up? This would change the
// meaning of the -delete-namespace and -delete-namespace-on-failure
// command line flags, because they would also start to apply
// to non-namespaced items.
for _, destructor := range destructors {
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
Logf("deleting failed: %s", err)
}
}
}
cleanupHandle = AddCleanupAction(cleanup)
var result error
for _, item := range items {
// Each factory knows which item(s) it supports, so try each one.
done := false
description := DescribeItem(item)
// Uncomment this line to get a full dump of the entire item.
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
Logf("creating %s", description)
for _, factory := range Factories {
destructor, err := factory.Create(f, item)
if destructor != nil {
destructors = append(destructors, func() error {
Logf("deleting %s", description)
return destructor()
})
}
if err == nil {
done = true
break
} else if errors.Cause(err) != ItemNotSupported {
result = err
break
}
}
if result == nil && !done {
result = errors.Errorf("item of type %T not supported", item)
break
}
}
if result != nil {
cleanup()
return nil, result
}
return cleanup, nil
}
// CreateFromManifests is a combination of LoadFromManifests,
// PatchItems, patching with an optional custom function,
// and CreateItems.
func (f *Framework) CreateFromManifests(patch func(item interface{}) error, files ...string) (func(), error) {
items, err := f.LoadFromManifests(files...)
if err != nil {
return nil, errors.Wrap(err, "CreateFromManifests")
}
if err := f.PatchItems(items...); err != nil {
return nil, err
}
if patch != nil {
for _, item := range items {
if err := patch(item); err != nil {
return nil, err
}
}
}
return f.CreateItems(items...)
}
// What is a subset of metav1.TypeMeta which (in contrast to
// metav1.TypeMeta itself) satisfies the runtime.Object interface.
type What struct {
Kind string `json:"kind"`
}
func (in *What) DeepCopy() *What {
return &What{Kind: in.Kind}
}
func (in *What) DeepCopyInto(out *What) {
out.Kind = in.Kind
}
func (in *What) DeepCopyObject() runtime.Object {
return &What{Kind: in.Kind}
}
func (in *What) GetObjectKind() schema.ObjectKind {
return nil
}
// ItemFactory provides support for creating one particular item.
// The type gets exported because other packages might want to
// extend the set of pre-defined factories.
type ItemFactory interface {
// New returns a new empty item.
New() runtime.Object
// Create is responsible for creating the item. It returns an
// error or a cleanup function for the created item.
// If the item is of an unsupported type, it must return
// an error that has ItemNotSupported as cause.
Create(f *Framework, item interface{}) (func() error, error)
}
// DescribeItem always returns a string that describes the item,
// usually by calling out to cache.MetaNamespaceKeyFunc which
// concatenates namespace (if set) and name. If that fails, the entire
// item gets converted to a string.
func DescribeItem(item interface{}) string {
key, err := cache.MetaNamespaceKeyFunc(item)
if err == nil && key != "" {
return fmt.Sprintf("%T: %s", item, key)
}
return fmt.Sprintf("%T: %s", item, item)
}
// ItemNotSupported is the error that Create methods
// must return or wrap when they don't support the given item.
var ItemNotSupported = errors.New("not supported")
var Factories = map[What]ItemFactory{
{"ClusterRole"}: &clusterRoleFactory{},
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
{"DaemonSet"}: &daemonSetFactory{},
{"Role"}: &roleFactory{},
{"RoleBinding"}: &roleBindingFactory{},
{"Secret"}: &secretFactory{},
{"Service"}: &serviceFactory{},
{"ServiceAccount"}: &serviceAccountFactory{},
{"StatefulSet"}: &statefulSetFactory{},
{"StorageClass"}: &storageClassFactory{},
}
// PatchName makes the name of some item unique by appending the
// generated unique name.
func (f *Framework) PatchName(item *string) {
if *item != "" {
*item = *item + "-" + f.UniqueName
}
}
// PatchNamespace moves the item into the test's namespace. Not
// all items can be namespaced. For those, the name also needs to be
// patched.
func (f *Framework) PatchNamespace(item *string) {
if f.Namespace != nil {
*item = f.Namespace.GetName()
}
}
func (f *Framework) patchItemRecursively(item interface{}) error {
switch item := item.(type) {
case *rbac.Subject:
f.PatchNamespace(&item.Namespace)
case *rbac.RoleRef:
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
// which contains all role names that are defined cluster-wide before the test starts?
// All those names are excempt from renaming. That list could be populated by querying
// and get extended by tests.
if item.Name != "e2e-test-privileged-psp" {
f.PatchName(&item.Name)
}
case *rbac.ClusterRole:
f.PatchName(&item.Name)
case *rbac.Role:
f.PatchNamespace(&item.Namespace)
// Roles are namespaced, but because for RoleRef above we don't
// know whether the referenced role is a ClusterRole or Role
// and therefore always renames, we have to do the same here.
f.PatchName(&item.Name)
case *storage.StorageClass:
f.PatchName(&item.Name)
case *v1.ServiceAccount:
f.PatchNamespace(&item.ObjectMeta.Namespace)
case *v1.Secret:
f.PatchNamespace(&item.ObjectMeta.Namespace)
case *rbac.ClusterRoleBinding:
f.PatchName(&item.Name)
for i := range item.Subjects {
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
return errors.Wrapf(err, "%T", f)
}
}
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
return errors.Wrapf(err, "%T", f)
}
case *rbac.RoleBinding:
f.PatchNamespace(&item.Namespace)
for i := range item.Subjects {
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
return errors.Wrapf(err, "%T", f)
}
}
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
return errors.Wrapf(err, "%T", f)
}
case *v1.Service:
f.PatchNamespace(&item.ObjectMeta.Namespace)
case *apps.StatefulSet:
f.PatchNamespace(&item.ObjectMeta.Namespace)
case *apps.DaemonSet:
f.PatchNamespace(&item.ObjectMeta.Namespace)
default:
return errors.Errorf("missing support for patching item of type %T", item)
}
return nil
}
// The individual factories all follow the same template, but with
// enough differences in types and functions that copy-and-paste
// looked like the least dirty approach. Perhaps one day Go will have
// generics.
type serviceAccountFactory struct{}
func (f *serviceAccountFactory) New() runtime.Object {
return &v1.ServiceAccount{}
}
func (*serviceAccountFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*v1.ServiceAccount)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ServiceAccount")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type clusterRoleFactory struct{}
func (f *clusterRoleFactory) New() runtime.Object {
return &rbac.ClusterRole{}
}
func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.ClusterRole)
if !ok {
return nil, ItemNotSupported
}
// Impersonation is required for Kubernetes < 1.12, see
// https://github.com/kubernetes/kubernetes/issues/62237#issuecomment-429315111
//
// This code is kept even for more recent Kubernetes, because users of
// the framework outside of Kubernetes might run against an older version
// of Kubernetes. It will be deprecated eventually.
//
// TODO: is this only needed for a ClusterRole or also for other non-namespaced
// items?
Logf("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := LoadConfig()
ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
ExpectNoError(err, "create superuser clientset")
client := superuserClientset.RbacV1().ClusterRoles()
if _, err = client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ClusterRole")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type clusterRoleBindingFactory struct{}
func (f *clusterRoleBindingFactory) New() runtime.Object {
return &rbac.ClusterRoleBinding{}
}
func (*clusterRoleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.ClusterRoleBinding)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.RbacV1().ClusterRoleBindings()
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create ClusterRoleBinding")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type roleFactory struct{}
func (f *roleFactory) New() runtime.Object {
return &rbac.Role{}
}
func (*roleFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.Role)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create Role")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type roleBindingFactory struct{}
func (f *roleBindingFactory) New() runtime.Object {
return &rbac.RoleBinding{}
}
func (*roleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*rbac.RoleBinding)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create RoleBinding")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type serviceFactory struct{}
func (f *serviceFactory) New() runtime.Object {
return &v1.Service{}
}
func (*serviceFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*v1.Service)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.CoreV1().Services(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create Service")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type statefulSetFactory struct{}
func (f *statefulSetFactory) New() runtime.Object {
return &apps.StatefulSet{}
}
func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*apps.StatefulSet)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create StatefulSet")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type daemonSetFactory struct{}
func (f *daemonSetFactory) New() runtime.Object {
return &apps.DaemonSet{}
}
func (*daemonSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*apps.DaemonSet)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create DaemonSet")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type storageClassFactory struct{}
func (f *storageClassFactory) New() runtime.Object {
return &storage.StorageClass{}
}
func (*storageClassFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*storage.StorageClass)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.StorageV1().StorageClasses()
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create StorageClass")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
type secretFactory struct{}
func (f *secretFactory) New() runtime.Object {
return &v1.Secret{}
}
func (*secretFactory) Create(f *Framework, i interface{}) (func() error, error) {
item, ok := i.(*v1.Secret)
if !ok {
return nil, ItemNotSupported
}
client := f.ClientSet.CoreV1().Secrets(f.Namespace.GetName())
if _, err := client.Create(item); err != nil {
return nil, errors.Wrap(err, "create Secret")
}
return func() error {
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
}, nil
}
// PrettyPrint returns a human-readable representation of an item.
func PrettyPrint(item interface{}) string {
data, err := json.MarshalIndent(item, "", " ")
if err == nil {
return string(data)
}
return fmt.Sprintf("%+v", item)
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"time"
@ -30,9 +31,11 @@ import (
"k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
watchtools "k8s.io/client-go/tools/watch"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
@ -172,7 +175,9 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
d.Generation <= d.Status.ObservedGeneration, nil
}
_, err = watch.Until(2*time.Minute, w, condition)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
}
@ -255,7 +260,7 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
Containers: []v1.Container{
{
Name: "write-pod",
Image: "busybox",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
SecurityContext: &v1.SecurityContext{

View File

@ -0,0 +1,91 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"sync"
)
type FlakeReport struct {
lock sync.RWMutex
Flakes []string `json:"flakes"`
FlakeCount int `json:"flakeCount"`
}
func NewFlakeReport() *FlakeReport {
return &FlakeReport{
Flakes: []string{},
}
}
func buildDescription(optionalDescription ...interface{}) string {
switch len(optionalDescription) {
case 0:
return ""
default:
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...)
}
}
// RecordFlakeIfError records the error (if non-nil) as a flake along with an optional description.
// This can be used as a replacement of framework.ExpectNoError() for non-critical errors that can
// be considered as 'flakes' to avoid causing failures in tests.
func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
if err == nil {
return
}
msg := fmt.Sprintf("Unexpected error occurred: %v", err)
desc := buildDescription(optionalDescription)
if desc != "" {
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
}
Logf(msg)
f.lock.Lock()
defer f.lock.Unlock()
f.Flakes = append(f.Flakes, msg)
f.FlakeCount++
}
func (f *FlakeReport) GetFlakeCount() int {
f.lock.RLock()
defer f.lock.RUnlock()
return f.FlakeCount
}
func (f *FlakeReport) PrintHumanReadable() string {
f.lock.RLock()
defer f.lock.RUnlock()
buf := bytes.Buffer{}
buf.WriteString(fmt.Sprintf("FlakeCount: %v\n", f.FlakeCount))
buf.WriteString("Flakes:\n")
for _, flake := range f.Flakes {
buf.WriteString(fmt.Sprintf("%v\n", flake))
}
return buf.String()
}
func (f *FlakeReport) PrintJSON() string {
f.lock.RLock()
defer f.lock.RUnlock()
return PrettyPrintJSON(f)
}
func (f *FlakeReport) SummaryKind() string {
return "FlakeReport"
}

View File

@ -14,19 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package framework contains provider-independent helper code for
// building and running E2E tests with Ginkgo. The actual Ginkgo test
// suites gets assembled by combining this framework, the optional
// provider support code and specific tests via a separate .go file
// like Kubernetes' test/e2e.go.
package framework
import (
"bufio"
"bytes"
"fmt"
"math/rand"
"os"
"path"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -36,16 +42,14 @@ import (
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/clientcmd"
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubemark"
"k8s.io/kubernetes/test/e2e/framework/metrics"
testutils "k8s.io/kubernetes/test/utils"
@ -65,8 +69,15 @@ const (
type Framework struct {
BaseName string
// Set together with creating the ClientSet and the namespace.
// Guaranteed to be unique in the cluster even when running the same
// test multiple times in parallel.
UniqueName string
ClientSet clientset.Interface
KubemarkExternalClusterClientSet clientset.Interface
APIExtensionsClientSet apiextensionsclient.Interface
CSIClientSet csi.Interface
InternalClientset *internalclientset.Clientset
AggregatorClient *aggregatorclient.Clientset
@ -90,6 +101,9 @@ type Framework struct {
logsSizeCloseChannel chan bool
logsSizeVerifier *LogsSizeVerifier
// Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport
// To make sure that this framework cleans up after itself, no matter what,
// we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all Cleanup actions.
@ -102,8 +116,6 @@ type Framework struct {
// or stdout if ReportDir is not set once test ends.
TestSummaries []TestDataSummary
kubemarkControllerCloseChannel chan struct{}
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
clusterAutoscalerMetricsBeforeTest metrics.MetricsCollection
}
@ -152,6 +164,15 @@ func (f *Framework) BeforeEach() {
if f.ClientSet == nil {
By("Creating a kubernetes client")
config, err := LoadConfig()
testDesc := CurrentGinkgoTestDescription()
if len(testDesc.ComponentTexts) > 0 {
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
config.UserAgent = fmt.Sprintf(
"%v -- %v",
rest.DefaultKubernetesUserAgent(),
componentTexts)
}
Expect(err).NotTo(HaveOccurred())
config.QPS = f.Options.ClientQPS
config.Burst = f.Options.ClientBurst
@ -163,12 +184,19 @@ func (f *Framework) BeforeEach() {
}
f.ClientSet, err = clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.InternalClientset, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
f.DynamicClient, err = dynamic.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
// csi.storage.k8s.io is based on CRD, which is served only as JSON
jsonConfig := config
jsonConfig.ContentType = "application/json"
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
Expect(err).NotTo(HaveOccurred())
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
@ -188,29 +216,11 @@ func (f *Framework) BeforeEach() {
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
externalConfig.QPS = f.Options.ClientQPS
externalConfig.Burst = f.Options.ClientBurst
Expect(err).NotTo(HaveOccurred())
externalClient, err := clientset.NewForConfig(externalConfig)
Expect(err).NotTo(HaveOccurred())
f.KubemarkExternalClusterClientSet = externalClient
f.kubemarkControllerCloseChannel = make(chan struct{})
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(f.KubemarkExternalClusterClientSet, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
Expect(err).NotTo(HaveOccurred())
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
Expect(TestContext.CloudConfig.KubemarkController.WaitForCacheSync(f.kubemarkControllerCloseChannel)).To(BeTrue())
go TestContext.CloudConfig.KubemarkController.Run(f.kubemarkControllerCloseChannel)
}
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
}
if !f.SkipNamespaceCreation {
By("Building a namespace api object")
By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
@ -225,13 +235,27 @@ func (f *Framework) BeforeEach() {
} else {
Logf("Skipping waiting for service account")
}
f.UniqueName = f.Namespace.GetName()
} else {
// not guaranteed to be unique, but very likely
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
var err error
var nodeMode NodesSet
switch TestContext.GatherKubeSystemResourceUsageData {
case "master":
nodeMode = MasterNodes
case "masteranddns":
nodeMode = MasterAndDNSNodes
default:
nodeMode = AllNodes
}
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
InKubemark: ProviderIs("kubemark"),
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
Nodes: nodeMode,
ResourceDataGatheringPeriod: 60 * time.Second,
ProbeDuration: 15 * time.Second,
PrintVerboseLogs: false,
@ -269,6 +293,8 @@ func (f *Framework) BeforeEach() {
}
}
f.flakeReport = NewFlakeReport()
}
// AfterEach deletes the namespace, after reading its events.
@ -322,29 +348,10 @@ func (f *Framework) AfterEach() {
// Print events if the test failed.
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
// Pass both unversioned client and versioned clientset, till we have removed all uses of the unversioned client.
if !f.SkipNamespaceCreation {
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
}
logFunc := Logf
if TestContext.ReportDir != "" {
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
file, err := os.Create(filePath)
if err != nil {
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
} else {
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
defer file.Close()
if err = file.Chmod(0644); err != nil {
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
}
logFunc = GetLogToFileFunc(file)
}
} else {
By("Dumping a list of prepulled images on each node...")
}
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
@ -378,8 +385,12 @@ func (f *Framework) AfterEach() {
}
}
if TestContext.CloudConfig.KubemarkController != nil {
close(f.kubemarkControllerCloseChannel)
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
// Report any flakes that were observed in the e2e test and reset.
if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 {
f.TestSummaries = append(f.TestSummaries, f.flakeReport)
f.flakeReport = nil
}
PrintSummaries(f.TestSummaries, f.BaseName)
@ -409,6 +420,10 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
return ns, err
}
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
}
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
// completes.
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
@ -533,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
return nil
} else {
return []v1.ServicePort{{
Protocol: "TCP",
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
}}

View File

@ -22,6 +22,7 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
. "github.com/onsi/gomega"
"k8s.io/klog"
)
const (
@ -50,13 +51,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
}
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
func NVIDIADevicePlugin(ns string) *v1.Pod {
func NVIDIADevicePlugin() *v1.Pod {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
Expect(err).NotTo(HaveOccurred())
p := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
Namespace: ns,
Namespace: metav1.NamespaceSystem,
},
Spec: ds.Spec.Template.Spec,
@ -69,7 +70,16 @@ func NVIDIADevicePlugin(ns string) *v1.Pod {
func GetGPUDevicePluginImage() string {
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
if err != nil {
klog.Errorf("Failed to parse the device plugin image: %v", err)
return ""
}
if ds == nil {
klog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil")
return ""
}
if len(ds.Spec.Template.Spec.Containers) < 1 {
klog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML")
return ""
}
return ds.Spec.Template.Spec.Containers[0].Image

View File

@ -0,0 +1,42 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["ingress_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/ingress",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
jobutil "k8s.io/kubernetes/pkg/controller/job"
)
const (
@ -181,8 +182,8 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
})
}
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
// WaitForJobComplete uses c to wait for compeletions to complete for the Job jobName in namespace ns.
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
@ -192,6 +193,17 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int
})
}
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return jobutil.IsJobFinished(curr), nil
})
}
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
return wait.Poll(Poll, timeout, func() (bool, error) {
@ -239,6 +251,18 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
return count == parallelism, nil
}
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
// to be deleted.
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
pods, err := GetJobPods(c, ns, jobName)
if err != nil {
return false, err
}
return len(pods.Items) == 0, nil
})
}
func newBool(val bool) *bool {
p := new(bool)
*p = val
@ -250,7 +274,7 @@ type updateJobFunc func(*batch.Job)
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.BatchV1().Jobs(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
pollErr := wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
return false, err
}
@ -268,3 +292,25 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
}
return job, pollErr
}
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
// a non-nil deletionTimestamp (i.e. being deleted).
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return curr.ObjectMeta.DeletionTimestamp != nil, nil
})
}
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
var finishTime metav1.Time
for _, c := range finishedJob.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return c.LastTransitionTime
}
}
return finishTime
}

View File

@ -492,7 +492,7 @@ type usageDataPerContainer struct {
}
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
if err != nil {
return "", err
}

View File

@ -21,12 +21,12 @@ go_library(
"//pkg/apis/core:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/util/system:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -22,9 +22,9 @@ import (
"reflect"
"strings"
"github.com/golang/glog"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"k8s.io/klog"
)
type Metrics map[string]model.Samples
@ -88,7 +88,7 @@ func parseMetrics(data string, output *Metrics) error {
// Expected loop termination condition.
return nil
}
glog.Warningf("Invalid Decode. Skipping.")
klog.Warningf("Invalid Decode. Skipping.")
continue
}
for _, metric := range v {

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/system"
"github.com/golang/glog"
"k8s.io/klog"
)
const (
@ -62,7 +62,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
return nil, err
}
if len(nodeList.Items) < 1 {
glog.Warning("Can't find any Nodes in the API server to grab metrics from")
klog.Warning("Can't find any Nodes in the API server to grab metrics from")
}
for _, node := range nodeList.Items {
if system.IsMasterNode(node.Name) {
@ -76,9 +76,9 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
controllers = false
clusterAutoscaler = ec != nil
if clusterAutoscaler {
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
} else {
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
}
}
@ -127,7 +127,7 @@ func (g *MetricsGrabber) GrabFromScheduler() (SchedulerMetrics, error) {
if !g.registeredMaster {
return SchedulerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping Scheduler's metrics gathering.")
}
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.SchedulerPort)
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.InsecureSchedulerPort)
if err != nil {
return SchedulerMetrics{}, err
}

View File

@ -23,9 +23,11 @@ import (
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -232,6 +234,143 @@ func (l *SchedulingMetrics) PrintJSON() string {
return PrettyPrintJSON(l)
}
type Histogram struct {
Labels map[string]string `json:"labels"`
Buckets map[string]int `json:"buckets"`
}
type HistogramVec []Histogram
func newHistogram(labels map[string]string) *Histogram {
return &Histogram{
Labels: labels,
Buckets: make(map[string]int),
}
}
type EtcdMetrics struct {
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
PeerRoundTripTime HistogramVec `json:"peerRoundTripTime"`
WalFsyncDuration HistogramVec `json:"walFsyncDuration"`
MaxDatabaseSize float64 `json:"maxDatabaseSize"`
}
func newEtcdMetrics() *EtcdMetrics {
return &EtcdMetrics{
BackendCommitDuration: make(HistogramVec, 0),
SnapshotSaveTotalDuration: make(HistogramVec, 0),
PeerRoundTripTime: make(HistogramVec, 0),
WalFsyncDuration: make(HistogramVec, 0),
}
}
func (l *EtcdMetrics) SummaryKind() string {
return "EtcdMetrics"
}
func (l *EtcdMetrics) PrintHumanReadable() string {
return PrettyPrintJSON(l)
}
func (l *EtcdMetrics) PrintJSON() string {
return PrettyPrintJSON(l)
}
type EtcdMetricsCollector struct {
stopCh chan struct{}
wg *sync.WaitGroup
metrics *EtcdMetrics
}
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
return &EtcdMetricsCollector{
stopCh: make(chan struct{}),
wg: &sync.WaitGroup{},
metrics: newEtcdMetrics(),
}
}
func getEtcdMetrics() ([]*model.Sample, error) {
// Etcd is only exposed on localhost level. We are using ssh method
if TestContext.Provider == "gke" {
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
return nil, nil
}
cmd := "curl http://localhost:2379/metrics"
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || sshResult.Code != 0 {
return nil, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
data := sshResult.Stdout
return extractMetricSamples(data)
}
func getEtcdDatabaseSize() (float64, error) {
samples, err := getEtcdMetrics()
if err != nil {
return 0, err
}
for _, sample := range samples {
if sample.Metric[model.MetricNameLabel] == "etcd_debugging_mvcc_db_total_size_in_bytes" {
return float64(sample.Value), nil
}
}
return 0, fmt.Errorf("Couldn't find etcd database size metric")
}
// StartCollecting starts to collect etcd db size metric periodically
// and updates MaxDatabaseSize accordingly.
func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
mc.wg.Add(1)
go func() {
defer mc.wg.Done()
for {
select {
case <-time.After(interval):
dbSize, err := getEtcdDatabaseSize()
if err != nil {
Logf("Failed to collect etcd database size")
continue
}
mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize)
case <-mc.stopCh:
return
}
}
}()
}
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
close(mc.stopCh)
mc.wg.Wait()
// Do some one-off collection of metrics.
samples, err := getEtcdMetrics()
if err != nil {
return err
}
for _, sample := range samples {
switch sample.Metric[model.MetricNameLabel] {
case "etcd_disk_backend_commit_duration_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.BackendCommitDuration)
case "etcd_debugging_snap_save_total_duration_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.SnapshotSaveTotalDuration)
case "etcd_disk_wal_fsync_duration_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.WalFsyncDuration)
case "etcd_network_peer_round_trip_time_seconds_bucket":
convertSampleToBucket(sample, &mc.metrics.PeerRoundTripTime)
}
}
return nil
}
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
return mc.metrics
}
type SaturationTime struct {
TimeToSaturate time.Duration `json:"timeToSaturate"`
NumberOfNodes int `json:"numberOfNodes"`
@ -472,7 +611,7 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
Context(ctx).
Namespace(metav1.NamespaceSystem).
Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.InsecureSchedulerPort)).
SubResource("proxy").
Suffix("metrics").
Do().Raw()
@ -500,6 +639,9 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
result := SchedulingMetrics{}
data, err := sendRestRequestToScheduler(c, "GET")
if err != nil {
return nil, err
}
samples, err := extractMetricSamples(data)
if err != nil {
@ -546,12 +688,33 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
func ResetSchedulerMetrics(c clientset.Interface) error {
responseText, err := sendRestRequestToScheduler(c, "DELETE")
if err != nil || responseText != "metrics reset\n" {
if err != nil {
return fmt.Errorf("Unexpected response: %q", responseText)
}
return nil
}
func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
labels := make(map[string]string)
for k, v := range sample.Metric {
if k != "le" {
labels[string(k)] = string(v)
}
}
var hist *Histogram
for i := range *h {
if reflect.DeepEqual(labels, (*h)[i].Labels) {
hist = &((*h)[i])
break
}
}
if hist == nil {
hist = newHistogram(labels)
*h = append(*h, *hist)
}
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
}
func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {

View File

@ -292,13 +292,13 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
if protocol == "udp" {
// TODO: It would be enough to pass 1s+epsilon to timeout, but unfortunately
// busybox timeout doesn't support non-integer values.
cmd = fmt.Sprintf("echo 'hostName' | timeout -t 2 nc -w 1 -u %s %d", targetIP, targetPort)
cmd = fmt.Sprintf("echo 'hostName' | nc -w 1 -u %s %d", targetIP, targetPort)
} else {
ipPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort))
// The current versions of curl included in CentOS and RHEL distros
// misinterpret square brackets around IPv6 as globbing, so use the -g
// argument to disable globbing to handle the IPv6 case.
cmd = fmt.Sprintf("timeout -t 15 curl -g -q -s --connect-timeout 1 http://%s/hostName", ipPort)
cmd = fmt.Sprintf("curl -g -q -s --max-time 15 --connect-timeout 1 http://%s/hostName", ipPort)
}
// TODO: This simply tells us that we can reach the endpoints. Check that
@ -948,8 +948,11 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
// This function executes commands on a node so it will work only for some
// environments.
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host := GetNodeExternalIP(node)
master := GetMasterAddress(c)
host, err := GetNodeExternalIP(node)
if err != nil {
Failf("Error getting node external ip : %v", err)
}
masterAddresses := GetAllMasterAddresses(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() {
// This code will execute even if setting the iptables rule failed.
@ -957,14 +960,18 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
// had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
UnblockNetwork(host, master)
for _, masterAddress := range masterAddresses {
UnblockNetwork(host, masterAddress)
}
}()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
BlockNetwork(host, master)
for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress)
}
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {

View File

@ -63,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
os.Environ(),
"TEST_ETCD_VERSION="+target_version,
"STORAGE_BACKEND="+target_storage,
"TEST_ETCD_IMAGE=3.2.18-0")
"TEST_ETCD_IMAGE=3.2.24-1")
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
return err
@ -103,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
env = append(env,
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
"TEST_ETCD_IMAGE=3.2.18-0")
"TEST_ETCD_IMAGE=3.2.24-1")
} else {
// In e2e tests, we skip the confirmation prompt about
// implicit etcd upgrades to simulate the user entering "y".

View File

@ -0,0 +1,28 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["podlogs.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/podlogs",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

Some files were not shown because too many files have changed in this diff Show More