vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

62
vendor/k8s.io/kubernetes/test/e2e/upgrades/BUILD generated vendored Normal file
View File

@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"apparmor.go",
"cassandra.go",
"configmaps.go",
"etcd.go",
"horizontal_pod_autoscalers.go",
"ingress.go",
"kube_proxy_migration.go",
"mysql.go",
"secrets.go",
"services.go",
"sysctl.go",
"upgrade.go",
],
importpath = "k8s.io/kubernetes/test/e2e/upgrades",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/onsi/gomega/gstruct:go_default_library",
"//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/upgrades/apps:all-srcs",
"//test/e2e/upgrades/storage:all-srcs",
],
tags = ["automanaged"],
)

112
vendor/k8s.io/kubernetes/test/e2e/upgrades/apparmor.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gstruct"
)
// AppArmorUpgradeTest tests that AppArmor profiles are enforced & usable across upgrades.
type AppArmorUpgradeTest struct {
pod *api.Pod
}
func (AppArmorUpgradeTest) Name() string { return "apparmor-upgrade" }
func (AppArmorUpgradeTest) Skip(upgCtx UpgradeContext) bool {
supportedImages := make(map[string]bool)
for _, d := range common.AppArmorDistros {
supportedImages[d] = true
}
for _, vCtx := range upgCtx.Versions {
if !supportedImages[vCtx.NodeImage] {
return true
}
}
return false
}
// Setup creates a secret and then verifies that a pod can consume it.
func (t *AppArmorUpgradeTest) Setup(f *framework.Framework) {
By("Loading AppArmor profiles to nodes")
common.LoadAppArmorProfiles(f)
// Create the initial test pod.
By("Creating a long-running AppArmor enabled pod.")
t.pod = common.CreateAppArmorTestPod(f, false, false)
// Verify initial state.
t.verifyNodesAppArmorEnabled(f)
t.verifyNewPodSucceeds(f)
}
// Test waits for the upgrade to complete, and then verifies that a
// pod can still consume the secret.
func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
if upgrade == MasterUpgrade {
t.verifyPodStillUp(f)
}
t.verifyNodesAppArmorEnabled(f)
t.verifyNewPodSucceeds(f)
}
// Teardown cleans up any remaining resources.
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
By("Logging container failures")
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
}
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {
By("Verifying an AppArmor profile is continuously enforced for a pod")
pod, err := f.PodClient().Get(t.pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Should be able to get pod")
Expect(pod.Status.Phase).To(Equal(api.PodRunning), "Pod should stay running")
Expect(pod.Status.ContainerStatuses[0].State.Running).NotTo(BeNil(), "Container should be running")
Expect(pod.Status.ContainerStatuses[0].RestartCount).To(BeZero(), "Container should not need to be restarted")
}
func (t *AppArmorUpgradeTest) verifyNewPodSucceeds(f *framework.Framework) {
By("Verifying an AppArmor profile is enforced for a new pod")
common.CreateAppArmorTestPod(f, false, true)
}
func (t *AppArmorUpgradeTest) verifyNodesAppArmorEnabled(f *framework.Framework) {
By("Verifying nodes are AppArmor enabled")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list nodes")
for _, node := range nodes.Items {
Expect(node.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
"Ready": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
"Message": ContainSubstring("AppArmor enabled"),
}),
}))
}
}
func conditionType(condition interface{}) string {
return string(condition.(api.NodeCondition).Type)
}

49
vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/BUILD generated vendored Normal file
View File

@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"daemonsets.go",
"deployments.go",
"job.go",
"replicasets.go",
"statefulset.go",
],
importpath = "k8s.io/kubernetes/test/e2e/upgrades/apps",
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,182 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
)
// DaemonSetUpgradeTest tests that a DaemonSet is running before and after
// a cluster upgrade.
type DaemonSetUpgradeTest struct {
daemonSet *extensions.DaemonSet
}
func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" }
// Setup creates a DaemonSet and verifies that it's running
func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
daemonSetName := "ds1"
labelSet := map[string]string{"ds-name": daemonSetName}
image := framework.ServeHostnameImage
ns := f.Namespace
t.daemonSet = &extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: daemonSetName,
},
Spec: extensions.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labelSet,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: daemonSetName,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
}
By("Creating a DaemonSet")
var err error
if t.daemonSet, err = f.ClientSet.ExtensionsV1beta1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
}
By("Waiting for DaemonSet pods to become ready")
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
})
framework.ExpectNoError(err)
By("Validating the DaemonSet after creation")
t.validateRunningDaemonSet(f)
}
// Test waits until the upgrade has completed and then verifies that the DaemonSet
// is still running
func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for upgradet to complete before re-validating DaemonSet")
<-done
By("validating the DaemonSet is still running after upgrade")
t.validateRunningDaemonSet(f)
}
// Teardown cleans up any remaining resources.
func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
By("confirming the DaemonSet pods are running on all expected nodes")
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
framework.ExpectNoError(err)
if !res {
framework.Failf("expected DaemonSet pod to be running on all nodes, it was not")
}
// DaemonSet resource itself should be good
By("confirming the DaemonSet resource is in a good state")
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
framework.ExpectNoError(err)
if !res {
framework.Failf("expected DaemonSet to be in a good state, it was not")
}
}
func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) {
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return false, err
}
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
if len(node.Spec.Taints) == 0 {
nodeNames = append(nodeNames, node.Name)
} else {
framework.Logf("Node %v not expected to have DaemonSet pod, has taints %v", node.Name, node.Spec.Taints)
}
}
return checkDaemonPodOnNodes(f, namespace, selector, nodeNames)
}
func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) {
selector := labels.Set(labelSet).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := f.ClientSet.Core().Pods(namespace).List(options)
if err != nil {
return false, err
}
pods := podList.Items
nodesToPodCount := make(map[string]int)
for _, pod := range pods {
if controller.IsPodActive(&pod) {
framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
nodesToPodCount[pod.Spec.NodeName]++
}
}
framework.Logf("nodesToPodCount: %v", nodesToPodCount)
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
for _, nodeName := range nodeNames {
if nodesToPodCount[nodeName] != 1 {
return false, nil
}
}
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
// other nodes.
return len(nodesToPodCount) == len(nodeNames), nil
}
func checkDaemonStatus(f *framework.Framework, namespace string, dsName string) (bool, error) {
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(namespace).Get(dsName, metav1.GetOptions{})
if err != nil {
return false, err
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready {
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,172 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
deploymentName = "dp"
)
// TODO: Test that the deployment stays available during master (and maybe
// node and cluster upgrades).
// DeploymentUpgradeTest tests that a deployment is using the same replica
// sets before and after a cluster upgrade.
type DeploymentUpgradeTest struct {
oldDeploymentUID types.UID
oldRSUID types.UID
newRSUID types.UID
}
func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" }
// Setup creates a deployment and makes sure it has a new and an old replicaset running.
func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, extensions.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 1 {
framework.ExpectNoError(fmt.Errorf("expected one replicaset, got %d", len(rss)))
}
t.oldRSUID = rss[0].UID
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
// Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss = rsList.Items
if len(rss) != 2 {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
switch t.oldRSUID {
case rss[0].UID:
t.newRSUID = rss[1].UID
case rss[1].UID:
t.newRSUID = rss[0].UID
default:
framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID))
}
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
t.oldDeploymentUID = deployment.UID
}
// Test checks whether the replicasets for a deployment are the same after an upgrade.
func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
<-done
c := f.ClientSet
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
Expect(deployment.UID).To(Equal(t.oldDeploymentUID))
By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 2 {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
switch t.oldRSUID {
case rss[0].UID:
Expect(rss[1].UID).To(Equal(t.newRSUID))
case rss[1].UID:
Expect(rss[0].UID).To(Equal(t.newRSUID))
default:
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
}
By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2"))
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *extensions.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
}
// Teardown cleans up any remaining resources.
func (t *DeploymentUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}

64
vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/job.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// JobUpgradeTest is a test harness for batch Jobs.
type JobUpgradeTest struct {
job *batch.Job
namespace string
}
func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
// Setup starts a Job with a parallelism of 2 and 2 completions running.
func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.namespace = f.Namespace.Name
By("Creating a job")
t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == parallelism")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
Expect(err).NotTo(HaveOccurred())
}
// Test verifies that the Jobs Pods are running after the an upgrade
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
By("Ensuring active pods == parallelism")
running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(BeTrue())
}
// Teardown cleans up any remaining resources.
func (t *JobUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}

View File

@ -0,0 +1,101 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"time"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
interval = 10 * time.Second
timeout = 5 * time.Minute
rsName = "rs"
scaleNum = 2
)
// TODO: Test that the replicaset stays available during master (and maybe
// node and cluster upgrades).
// ReplicaSetUpgradeTest tests that a replicaset survives upgrade.
type ReplicaSetUpgradeTest struct {
UID types.UID
}
func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" }
func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet
ns := f.Namespace.Name
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.Extensions().ReplicaSets(ns).Create(replicaSet)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID
}
// Test checks whether the replicasets are the same after an upgrade.
func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
c := f.ClientSet
ns := f.Namespace.Name
rsClient := c.Extensions().ReplicaSets(ns)
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
<-done
// Verify the RS is the same (survives) after the upgrade
By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
if upgradedRS.UID != r.UID {
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))
}
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *extensions.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
}
// Teardown cleans up any remaining resources.
func (r *ReplicaSetUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
)
// StatefulSetUpgradeTest implements an upgrade test harness for StatefulSet upgrade testing.
type StatefulSetUpgradeTest struct {
tester *framework.StatefulSetTester
service *v1.Service
set *apps.StatefulSet
}
func (StatefulSetUpgradeTest) Name() string { return "[sig-apps] statefulset-upgrade" }
func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0")
for _, vCtx := range upgCtx.Versions {
if vCtx.Version.LessThan(minVersion) {
return true
}
}
return false
}
// Setup creates a StatefulSet and a HeadlessService. It verifies the basic SatefulSet properties
func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
ssName := "ss"
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
headlessSvcName := "test"
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ns := f.Namespace.Name
t.set = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
t.service = framework.CreateStatefulSetService(ssName, labels)
*(t.set.Spec.Replicas) = 3
t.tester = framework.NewStatefulSetTester(f.ClientSet)
t.tester.PauseNewPods(t.set)
By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.Core().Services(ns).Create(t.service)
Expect(err).NotTo(HaveOccurred())
By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1beta1().StatefulSets(ns).Create(t.set)
Expect(err).NotTo(HaveOccurred())
By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set)
t.verify()
t.restart()
t.verify()
}
// Waits for the upgrade to complete and verifies the StatefulSet basic functionality
func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
t.verify()
}
// Deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
}
func (t *StatefulSetUpgradeTest) verify() {
By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(t.tester.CheckMount(t.set, "/data"))
By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(t.tester.CheckHostname(t.set))
By("Verifying statefulset set proper service name")
framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd))
}
func (t *StatefulSetUpgradeTest) restart() {
By("Restarting statefulset " + t.set.Name)
t.tester.Restart(t.set)
t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set)
}

216
vendor/k8s.io/kubernetes/test/e2e/upgrades/cassandra.go generated vendored Normal file
View File

@ -0,0 +1,216 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
)
const cassandraManifestPath = "test/e2e/testing-manifests/statefulset/cassandra"
// CassandraUpgradeTest ups and verifies that a Cassandra StatefulSet behaves
// well across upgrades.
type CassandraUpgradeTest struct {
ip string
successfulWrites int
ssTester *framework.StatefulSetTester
}
// Name returns the tracking name of the test.
func (CassandraUpgradeTest) Name() string { return "cassandra-upgrade" }
// Skip returns true when this test can be skipped.
func (CassandraUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.6.0")
for _, vCtx := range upgCtx.Versions {
if vCtx.Version.LessThan(minVersion) {
return true
}
}
return false
}
func cassandraKubectlCreate(ns, file string) {
path := filepath.Join(framework.TestContext.RepoRoot, cassandraManifestPath, file)
framework.RunKubectlOrDie("create", "-f", path, fmt.Sprintf("--namespace=%s", ns))
}
// Setup creates a Cassandra StatefulSet and a PDB. It also brings up a tester
// ReplicaSet and associated service and PDB to guarantee availability during
// the upgrade.
// It waits for the system to stabilize before adding two users to verify
// connectivity.
func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB")
cassandraKubectlCreate(ns, "pdb.yaml")
By("Creating a Cassandra StatefulSet")
t.ssTester.CreateStatefulSet(cassandraManifestPath, ns)
By("Creating a cassandra-test-server deployment")
cassandraKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil
}
if _, err := t.listUsers(); err != nil {
framework.Logf("Service endpoint is up but isn't responding")
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Service endpoint is up")
By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred())
t.successfulWrites = 2
By("Verifying that the users exist")
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users)).To(Equal(2))
}
// listUsers gets a list of users from the db via the tester service.
func (t *CassandraUpgradeTest) listUsers() ([]string, error) {
r, err := http.Get(fmt.Sprintf("http://%s:8080/list", t.ip))
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
return nil, fmt.Errorf(string(b))
}
var names []string
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
return nil, err
}
return names, nil
}
// addUser adds a user to the db via the tester services.
func (t *CassandraUpgradeTest) addUser(name string) error {
val := map[string][]string{"name": {name}}
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/add", t.ip), val)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
return fmt.Errorf(string(b))
}
return nil
}
// getServiceIP is a helper method to extract the Ingress IP from the service.
func (t *CassandraUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
}
return ingress[0].IP
}
// Test is called during the upgrade.
// It launches two goroutines, one continuously writes to the db and one reads
// from the db. Each attempt is tallied and at the end we verify if the success
// ratio is over a certain threshold (0.75). We also verify that we get
// at least the same number of rows back as we successfully wrote.
func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.")
var (
success, failures, writeAttempts, lastUserCount int
mu sync.Mutex
errors = map[string]int{}
)
// Write loop.
go wait.Until(func() {
writeAttempts++
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
framework.Logf("Unable to add user: %v", err)
mu.Lock()
errors[err.Error()]++
mu.Unlock()
return
}
t.successfulWrites++
}, 10*time.Millisecond, done)
// Read loop.
wait.Until(func() {
users, err := t.listUsers()
if err != nil {
framework.Logf("Could not retrieve users: %v", err)
failures++
mu.Lock()
errors[err.Error()]++
mu.Unlock()
return
}
success++
lastUserCount = len(users)
}, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue())
ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue())
}
// Teardown does one final check of the data's availability.
func (t *CassandraUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue())
}

View File

@ -0,0 +1,150 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/uuid"
)
// ConfigMapUpgradeTest tests that a ConfigMap is available before and after
// a cluster upgrade.
type ConfigMapUpgradeTest struct {
configMap *v1.ConfigMap
}
func (ConfigMapUpgradeTest) Name() string {
return "configmap-upgrade [sig-storage] [sig-api-machinery]"
}
// Setup creates a ConfigMap and then verifies that a pod can consume it.
func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
configMapName := "upgrade-configmap"
ns := f.Namespace
t.configMap = &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: configMapName,
},
Data: map[string]string{
"data": "some configmap data",
},
}
By("Creating a ConfigMap")
var err error
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
}
By("Making sure the ConfigMap is consumable")
t.testPod(f)
}
// Test waits for the upgrade to complete, and then verifies that a
// pod can still consume the ConfigMap.
func (t *ConfigMapUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Consuming the ConfigMap after upgrade")
t.testPod(f)
}
// Teardown cleans up any remaining resources.
func (t *ConfigMapUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
// testPod creates a pod that consumes a ConfigMap and prints it out. The
// output is then verified.
func (t *ConfigMapUpgradeTest) testPod(f *framework.Framework) {
volumeName := "configmap-volume"
volumeMountPath := "/etc/configmap-volume"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-configmap-" + string(uuid.NewUUID()),
Namespace: t.configMap.ObjectMeta.Namespace,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: t.configMap.ObjectMeta.Name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
fmt.Sprintf("--file_content=%s/data", volumeMountPath),
fmt.Sprintf("--file_mode=%s/data", volumeMountPath),
},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
{
Name: "configmap-env-test",
Image: "busybox",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "CONFIGMAP_DATA",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: t.configMap.ObjectMeta.Name,
},
Key: "data",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
expectedOutput := []string{
"content of file \"/etc/configmap-volume/data\": some configmap data",
"mode of file \"/etc/configmap-volume/data\": -rw-r--r--",
}
f.TestContainerOutput("volume consume configmap", pod, 0, expectedOutput)
expectedOutput = []string{"CONFIGMAP_DATA=some configmap data"}
f.TestContainerOutput("env consume configmap", pod, 1, expectedOutput)
}

199
vendor/k8s.io/kubernetes/test/e2e/upgrades/etcd.go generated vendored Normal file
View File

@ -0,0 +1,199 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"sync"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
)
const manifestPath = "test/e2e/testing-manifests/statefulset/etcd"
type EtcdUpgradeTest struct {
ip string
successfulWrites int
ssTester *framework.StatefulSetTester
}
func (EtcdUpgradeTest) Name() string { return "etcd-upgrade" }
func (EtcdUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.6.0")
for _, vCtx := range upgCtx.Versions {
if vCtx.Version.LessThan(minVersion) {
return true
}
}
return false
}
func kubectlCreate(ns, file string) {
path := filepath.Join(framework.TestContext.RepoRoot, manifestPath, file)
framework.RunKubectlOrDie("create", "-f", path, fmt.Sprintf("--namespace=%s", ns))
}
func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a PDB")
kubectlCreate(ns, "pdb.yaml")
By("Creating an etcd StatefulSet")
t.ssTester.CreateStatefulSet(manifestPath, ns)
By("Creating an etcd--test-server deployment")
kubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the services")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil
}
if _, err := t.listUsers(); err != nil {
framework.Logf("Service endpoint is up but isn't responding")
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Service endpoint is up")
By("Adding 2 dummy users")
Expect(t.addUser("Alice")).NotTo(HaveOccurred())
Expect(t.addUser("Bob")).NotTo(HaveOccurred())
t.successfulWrites = 2
By("Verifying that the users exist")
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users)).To(Equal(2))
}
func (t *EtcdUpgradeTest) listUsers() ([]string, error) {
r, err := http.Get(fmt.Sprintf("http://%s:8080/list", t.ip))
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
return nil, fmt.Errorf(string(b))
}
var names []string
if err := json.NewDecoder(r.Body).Decode(&names); err != nil {
return nil, err
}
return names, nil
}
func (t *EtcdUpgradeTest) addUser(name string) error {
val := map[string][]string{"name": {name}}
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/add", t.ip), val)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
return fmt.Errorf(string(b))
}
return nil
}
func (t *EtcdUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
}
return ingress[0].IP
}
func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
By("Continuously polling the database during upgrade.")
var (
success, failures, writeAttempts, lastUserCount int
mu sync.Mutex
errors = map[string]int{}
)
// Write loop.
go wait.Until(func() {
writeAttempts++
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
framework.Logf("Unable to add user: %v", err)
mu.Lock()
errors[err.Error()]++
mu.Unlock()
return
}
t.successfulWrites++
}, 10*time.Millisecond, done)
// Read loop.
wait.Until(func() {
users, err := t.listUsers()
if err != nil {
framework.Logf("Could not retrieve users: %v", err)
failures++
mu.Lock()
errors[err.Error()]++
mu.Unlock()
return
}
success++
lastUserCount = len(users)
}, 10*time.Millisecond, done)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
Expect(lastUserCount >= t.successfulWrites).To(BeTrue())
ratio := float64(success) / float64(success+failures)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs.
Expect(ratio > 0.75).To(BeTrue())
}
// Teardown does one final check of the data's availability.
func (t *EtcdUpgradeTest) Teardown(f *framework.Framework) {
users, err := t.listUsers()
Expect(err).NotTo(HaveOccurred())
Expect(len(users) >= t.successfulWrites).To(BeTrue())
}

View File

@ -0,0 +1,98 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
type HPAUpgradeTest struct {
rc *common.ResourceConsumer
hpa *autoscalingv1.HorizontalPodAutoscaler
}
func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
// Creates a resource consumer and an HPA object that autoscales the consumer.
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
t.rc = common.NewDynamicResourceConsumer(
"res-cons-upgrade",
f.Namespace.Name,
common.KindRC,
1, /* replicas */
250, /* initCPUTotal */
0,
0,
500, /* cpuLimit */
200, /* memLimit */
f.ClientSet,
f.InternalClientset)
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
t.rc,
20, /* targetCPUUtilizationPercent */
1, /* minPods */
5) /* maxPods */
t.rc.Pause()
t.test()
}
// Test waits for upgrade to complete and verifies if HPA works correctly.
func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking HPA"))
<-done
t.test()
}
// Teardown cleans up any remaining resources.
func (t *HPAUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
common.DeleteHorizontalPodAutoscaler(t.rc, t.hpa.Name)
t.rc.CleanUp()
}
func (t *HPAUpgradeTest) test() {
const timeToWait = 15 * time.Minute
t.rc.Resume()
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
t.rc.ConsumeCPU(10) /* millicores */
By(fmt.Sprintf("HPA waits for 1 replica"))
t.rc.WaitForReplicas(1, timeToWait)
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
t.rc.ConsumeCPU(250) /* millicores */
By(fmt.Sprintf("HPA waits for 3 replicas"))
t.rc.WaitForReplicas(3, timeToWait)
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
t.rc.ConsumeCPU(700) /* millicores */
By(fmt.Sprintf("HPA waits for 5 replicas"))
t.rc.WaitForReplicas(5, timeToWait)
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.
t.rc.Pause()
}

125
vendor/k8s.io/kubernetes/test/e2e/upgrades/ingress.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"net/http"
"path/filepath"
. "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
)
// IngressUpgradeTest adapts the Ingress e2e for upgrade testing
type IngressUpgradeTest struct {
gceController *framework.GCEIngressController
jig *framework.IngressTestJig
httpClient *http.Client
ip string
ipName string
}
func (IngressUpgradeTest) Name() string { return "ingress-upgrade" }
// Setup creates a GLBC, allocates an ip, and an ingress resource,
// then waits for a successful connectivity check to the ip.
func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
framework.SkipUnlessProviderIs("gce", "gke")
// jig handles all Kubernetes testing logic
jig := framework.NewIngressTestJig(f.ClientSet)
ns := f.Namespace
// gceController handles all cloud testing logic
gceController := &framework.GCEIngressController{
Ns: ns.Name,
Client: jig.Client,
Cloud: framework.TestContext.CloudConfig,
}
gceController.Init()
t.gceController = gceController
t.jig = jig
t.httpClient = framework.BuildInsecureClient(framework.IngressReqTimeout)
// Allocate a static-ip for the Ingress, this IP is cleaned up via CleanupGCEIngressController
t.ipName = fmt.Sprintf("%s-static-ip", ns.Name)
t.ip = t.gceController.CreateStaticIP(t.ipName)
// Create a working basic Ingress
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", t.ipName, t.ip))
jig.CreateIngress(filepath.Join(framework.IngressManifestPath, "static-ip"), ns.Name, map[string]string{
"kubernetes.io/ingress.global-static-ip-name": t.ipName,
"kubernetes.io/ingress.allow-http": "false",
}, map[string]string{})
By("waiting for Ingress to come up with ip: " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, t.httpClient, false))
}
// Test waits for the upgrade to complete, and then verifies
// with a connectvity check to the loadbalancer ip.
func (t *IngressUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
switch upgrade {
case MasterUpgrade:
// Restarting the ingress controller shouldn't disrupt a steady state
// Ingress. Restarting the ingress controller and deleting ingresses
// while it's down will leak cloud resources, because the ingress
// controller doesn't checkpoint to disk.
t.verify(f, done, true)
default:
// Currently ingress gets disrupted across node upgrade, because endpoints
// get killed and we don't have any guarantees that 2 nodes don't overlap
// their upgrades (even on cloud platforms like GCE, because VM level
// rolling upgrades are not Kubernetes aware).
t.verify(f, done, false)
}
}
// Teardown cleans up any remaining resources.
func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
if CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(t.gceController.Ns)
}
if t.jig.Ingress != nil {
By("Deleting ingress")
t.jig.TryDeleteIngress()
} else {
By("No ingress created, no cleanup necessary")
}
By("Cleaning up cloud resources")
framework.CleanupGCEIngressController(t.gceController)
}
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
By("continuously hitting the Ingress IP")
wait.Until(func() {
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}, t.jig.PollInterval, done)
} else {
By("waiting for upgrade to finish without checking if Ingress remains up")
<-done
}
By("hitting the Ingress IP " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/", t.ip), "", framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
}

View File

@ -0,0 +1,220 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"time"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTestTimeout = time.Duration(5 * time.Minute)
clusterAddonLabelKey = "k8s-app"
clusterComponentKey = "component"
kubeProxyLabelName = "kube-proxy"
)
// KubeProxyUpgradeTest tests kube-proxy static pods -> DaemonSet upgrade path.
type KubeProxyUpgradeTest struct {
}
func (KubeProxyUpgradeTest) Name() string { return "[sig-network] kube-proxy-upgrade" }
// Setup verifies kube-proxy static pods is running before uprgade.
func (t *KubeProxyUpgradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(f.ClientSet)).NotTo(HaveOccurred())
}
// Test validates if kube-proxy is migrated from static pods to DaemonSet.
func (t *KubeProxyUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
c := f.ClientSet
// Block until upgrade is done.
By("Waiting for upgrade to finish")
<-done
By("Waiting for kube-proxy static pods disappear")
Expect(waitForKubeProxyStaticPodsDisappear(c)).NotTo(HaveOccurred())
By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(c)).NotTo(HaveOccurred())
}
// Teardown does nothing.
func (t *KubeProxyUpgradeTest) Teardown(f *framework.Framework) {
}
// KubeProxyDowngradeTest tests kube-proxy DaemonSet -> static pods downgrade path.
type KubeProxyDowngradeTest struct {
}
func (KubeProxyDowngradeTest) Name() string { return "[sig-network] kube-proxy-downgrade" }
// Setup verifies kube-proxy DaemonSet is running before uprgade.
func (t *KubeProxyDowngradeTest) Setup(f *framework.Framework) {
By("Waiting for kube-proxy DaemonSet running and ready")
Expect(waitForKubeProxyDaemonSetRunning(f.ClientSet)).NotTo(HaveOccurred())
}
// Test validates if kube-proxy is migrated from DaemonSet to static pods.
func (t *KubeProxyDowngradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
c := f.ClientSet
// Block until upgrade is done.
By("Waiting for upgrade to finish")
<-done
By("Waiting for kube-proxy DaemonSet disappear")
Expect(waitForKubeProxyDaemonSetDisappear(c)).NotTo(HaveOccurred())
By("Waiting for kube-proxy static pods running and ready")
Expect(waitForKubeProxyStaticPodsRunning(c)).NotTo(HaveOccurred())
}
// Teardown does nothing.
func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) {
}
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
condition := func() (bool, error) {
pods, err := getKubeProxyStaticPods(c)
if err != nil {
framework.Logf("Failed to get kube-proxy static pods: %v", err)
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
numberkubeProxyPods := 0
for _, pod := range pods.Items {
if pod.Status.Phase == v1.PodRunning {
numberkubeProxyPods = numberkubeProxyPods + 1
}
}
if numberkubeProxyPods != numberSchedulableNodes {
framework.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods running: %v", err)
}
return nil
}
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
condition := func() (bool, error) {
pods, err := getKubeProxyStaticPods(c)
if err != nil {
framework.Logf("Failed to get kube-proxy static pods: %v", err)
return false, nil
}
if len(pods.Items) != 0 {
framework.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods disappear: %v", err)
}
return nil
}
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
condition := func() (bool, error) {
daemonSets, err := getKubeProxyDaemonSet(c)
if err != nil {
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
return false, nil
}
if len(daemonSets.Items) != 1 {
framework.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
if numberkubeProxyPods != numberSchedulableNodes {
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet running: %v", err)
}
return nil
}
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
condition := func() (bool, error) {
daemonSets, err := getKubeProxyDaemonSet(c)
if err != nil {
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
return false, nil
}
if len(daemonSets.Items) != 0 {
framework.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
return false, nil
}
return true, nil
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet disappear: %v", err)
}
return nil
}
func getKubeProxyStaticPods(c clientset.Interface) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterComponentKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
}
func getKubeProxyDaemonSet(c clientset.Interface) (*extensions.DaemonSetList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{clusterAddonLabelKey: kubeProxyLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
return c.Extensions().DaemonSets(metav1.NamespaceSystem).List(listOpts)
}

209
vendor/k8s.io/kubernetes/test/e2e/upgrades/mysql.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"strconv"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
)
const mysqlManifestPath = "test/e2e/testing-manifests/statefulset/mysql-upgrade"
// MySqlUpgradeTest implements an upgrade test harness that polls a replicated sql database.
type MySqlUpgradeTest struct {
ip string
successfulWrites int
nextWrite int
ssTester *framework.StatefulSetTester
}
func (MySqlUpgradeTest) Name() string { return "mysql-upgrade" }
func (MySqlUpgradeTest) Skip(upgCtx UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0")
for _, vCtx := range upgCtx.Versions {
if vCtx.Version.LessThan(minVersion) {
return true
}
}
return false
}
func mysqlKubectlCreate(ns, file string) {
path := filepath.Join(framework.TestContext.RepoRoot, mysqlManifestPath, file)
framework.RunKubectlOrDie("create", "-f", path, fmt.Sprintf("--namespace=%s", ns))
}
func (t *MySqlUpgradeTest) getServiceIP(f *framework.Framework, ns, svcName string) string {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(svcName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
ingress := svc.Status.LoadBalancer.Ingress
if len(ingress) == 0 {
return ""
}
return ingress[0].IP
}
// Setup creates a StatefulSet, HeadlessService, a Service to write to the db, and a Service to read
// from the db. It then connects to the db with the write Service and populates the db with a table
// and a few entries. Finally, it connects to the db with the read Service, and confirms the data is
// available. The db connections are left open to be used later in the test.
func (t *MySqlUpgradeTest) Setup(f *framework.Framework) {
ns := f.Namespace.Name
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
t.ssTester = framework.NewStatefulSetTester(f.ClientSet)
By("Creating a configmap")
mysqlKubectlCreate(ns, "configmap.yaml")
By("Creating a mysql StatefulSet")
t.ssTester.CreateStatefulSet(mysqlManifestPath, ns)
By("Creating a mysql-test-server deployment")
mysqlKubectlCreate(ns, "tester.yaml")
By("Getting the ingress IPs from the test-service")
err := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
if t.ip = t.getServiceIP(f, ns, "test-server"); t.ip == "" {
return false, nil
}
if _, err := t.countNames(); err != nil {
framework.Logf("Service endpoint is up but isn't responding")
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
framework.Logf("Service endpoint is up")
By("Adding 2 names to the database")
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred())
Expect(t.addName(strconv.Itoa(t.nextWrite))).NotTo(HaveOccurred())
By("Verifying that the 2 names have been inserted")
count, err := t.countNames()
Expect(err).NotTo(HaveOccurred())
Expect(count).To(Equal(2))
}
// Test continually polls the db using the read and write connections, inserting data, and checking
// that all the data is readable.
func (t *MySqlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
var writeSuccess, readSuccess, writeFailure, readFailure int
By("Continuously polling the database during upgrade.")
go wait.Until(func() {
_, err := t.countNames()
if err != nil {
framework.Logf("Error while trying to read data: %v", err)
readFailure++
} else {
readSuccess++
}
}, framework.Poll, done)
wait.Until(func() {
err := t.addName(strconv.Itoa(t.nextWrite))
if err != nil {
framework.Logf("Error while trying to write data: %v", err)
writeFailure++
} else {
writeSuccess++
}
}, framework.Poll, done)
t.successfulWrites = writeSuccess
framework.Logf("Successful reads: %d", readSuccess)
framework.Logf("Successful writes: %d", writeSuccess)
framework.Logf("Failed reads: %d", readFailure)
framework.Logf("Failed writes: %d", writeFailure)
// TODO: Not sure what the ratio defining a successful test run should be. At time of writing the
// test, failures only seem to happen when a race condition occurs (read/write starts, doesn't
// finish before upgrade interferes).
readRatio := float64(readSuccess) / float64(readSuccess+readFailure)
writeRatio := float64(writeSuccess) / float64(writeSuccess+writeFailure)
if readRatio < 0.75 {
framework.Failf("Too many failures reading data. Success ratio: %f", readRatio)
}
if writeRatio < 0.75 {
framework.Failf("Too many failures writing data. Success ratio: %f", writeRatio)
}
}
// Teardown performs one final check of the data's availability.
func (t *MySqlUpgradeTest) Teardown(f *framework.Framework) {
count, err := t.countNames()
Expect(err).NotTo(HaveOccurred())
Expect(count >= t.successfulWrites).To(BeTrue())
}
// addName adds a new value to the db.
func (t *MySqlUpgradeTest) addName(name string) error {
val := map[string][]string{"name": {name}}
t.nextWrite++
r, err := http.PostForm(fmt.Sprintf("http://%s:8080/addName", t.ip), val)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return err
}
return fmt.Errorf(string(b))
}
return nil
}
// countNames checks to make sure the values in testing.users are available, and returns
// the count of them.
func (t *MySqlUpgradeTest) countNames() (int, error) {
r, err := http.Get(fmt.Sprintf("http://%s:8080/countNames", t.ip))
if err != nil {
return 0, err
}
defer r.Body.Close()
if r.StatusCode != http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return 0, err
}
return 0, fmt.Errorf(string(b))
}
var count int
if err := json.NewDecoder(r.Body).Decode(&count); err != nil {
return 0, err
}
return count, nil
}

147
vendor/k8s.io/kubernetes/test/e2e/upgrades/secrets.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
)
// SecretUpgradeTest test that a secret is available before and after
// a cluster upgrade.
type SecretUpgradeTest struct {
secret *v1.Secret
}
func (SecretUpgradeTest) Name() string { return "secret-upgrade [sig-storage] [sig-api-machinery]" }
// Setup creates a secret and then verifies that a pod can consume it.
func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
secretName := "upgrade-secret"
ns := f.Namespace
t.secret = &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: secretName,
},
Data: map[string][]byte{
"data": []byte("keep it secret"),
},
}
By("Creating a secret")
var err error
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
}
By("Making sure the secret is consumable")
t.testPod(f)
}
// Test waits for the upgrade to complete, and then verifies that a
// pod can still consume the secret.
func (t *SecretUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Consuming the secret after upgrade")
t.testPod(f)
}
// Teardown cleans up any remaining resources.
func (t *SecretUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
// testPod creates a pod that consumes a secret and prints it out. The
// output is then verified.
func (t *SecretUpgradeTest) testPod(f *framework.Framework) {
volumeName := "secret-volume"
volumeMountPath := "/etc/secret-volume"
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
Namespace: t.secret.ObjectMeta.Namespace,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: t.secret.ObjectMeta.Name,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: imageutils.GetE2EImage(imageutils.Mounttest),
Args: []string{
fmt.Sprintf("--file_content=%s/data", volumeMountPath),
fmt.Sprintf("--file_mode=%s/data", volumeMountPath),
},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
{
Name: "secret-env-test",
Image: "busybox",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "SECRET_DATA",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: t.secret.ObjectMeta.Name,
},
Key: "data",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
expectedOutput := []string{
"content of file \"/etc/secret-volume/data\": keep it secret",
"mode of file \"/etc/secret-volume/data\": -rw-r--r--",
}
f.TestContainerOutput("volume consume secrets", pod, 0, expectedOutput)
expectedOutput = []string{"SECRET_DATA=keep it secret"}
f.TestContainerOutput("env consume secrets", pod, 1, expectedOutput)
}

112
vendor/k8s.io/kubernetes/test/e2e/upgrades/services.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// ServiceUpgradeTest tests that a service is available before and
// after a cluster upgrade. During a master-only upgrade, it will test
// that a service remains available during the upgrade.
type ServiceUpgradeTest struct {
jig *framework.ServiceTestJig
tcpService *v1.Service
tcpIngressIP string
svcPort int
}
func (ServiceUpgradeTest) Name() string { return "service-upgrade" }
func shouldTestPDBs() bool { return framework.ProviderIs("gce", "gke") }
// Setup creates a service with a load balancer and makes sure it's reachable.
func (t *ServiceUpgradeTest) Setup(f *framework.Framework) {
serviceName := "service-test"
jig := framework.NewServiceTestJig(f.ClientSet, serviceName)
ns := f.Namespace
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + ns.Name)
tcpService := jig.CreateTCPServiceOrFail(ns.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer
})
tcpService = jig.WaitForLoadBalancerOrFail(ns.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
// Get info to hit it with
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName)
rc := jig.RunOrFail(ns.Name, jig.AddRCAntiAffinity)
if shouldTestPDBs() {
By("creating a PodDisruptionBudget to cover the ReplicationController")
jig.CreatePDBOrFail(ns.Name, rc)
}
// Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig = jig
t.tcpService = tcpService
t.tcpIngressIP = tcpIngressIP
t.svcPort = svcPort
}
// Test runs a connectivity check to the service.
func (t *ServiceUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
switch upgrade {
case MasterUpgrade:
t.test(f, done, true)
case NodeUpgrade:
// Node upgrades should test during disruption only on GCE/GKE for now.
t.test(f, done, shouldTestPDBs())
default:
t.test(f, done, false)
}
}
// Teardown cleans up any remaining resources.
func (t *ServiceUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
func (t *ServiceUpgradeTest) test(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
// Continuous validation
By("continuously hitting the pod through the service's LoadBalancer")
wait.Until(func() {
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
}, framework.Poll, done)
} else {
// Block until upgrade is done
By("waiting for upgrade to finish without checking if service remains up")
<-done
}
// Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer")
t.jig.TestReachableHTTP(t.tcpIngressIP, t.svcPort, framework.LoadBalancerLagTimeoutDefault)
t.jig.SanityCheckService(t.tcpService, v1.ServiceTypeLoadBalancer)
}

View File

@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["persistent_volumes.go"],
importpath = "k8s.io/kubernetes/test/e2e/upgrades/storage",
deps = [
"//test/e2e/framework:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,104 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/upgrades"
)
// PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade.
type PersistentVolumeUpgradeTest struct {
pvSource *v1.PersistentVolumeSource
pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim
}
func (PersistentVolumeUpgradeTest) Name() string { return "persistent-volume-upgrade [sig-storage]" }
const (
pvTestFile string = "/mnt/volume1/pv_upgrade_test"
pvTestData string = "keep it pv"
pvWriteCmd string = "echo \"" + pvTestData + "\" > " + pvTestFile
pvReadCmd string = "cat " + pvTestFile
)
func (t *PersistentVolumeUpgradeTest) deleteGCEVolume(pvSource *v1.PersistentVolumeSource) error {
return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
}
// Setup creates a pv and then verifies that a pod can consume it. The pod writes data to the volume.
func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
var err error
// TODO: generalize this to other providers
framework.SkipUnlessProviderIs("gce", "gke")
ns := f.Namespace.Name
By("Initializing PV source")
t.pvSource, _ = framework.CreateGCEVolume()
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "pv-upgrade",
PVSource: *t.pvSource,
Prebind: nil,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
}
By("Creating the PV and PVC")
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitOnPVandPVC(f.ClientSet, ns, t.pv, t.pvc))
By("Consuming the PV before upgrade")
t.testPod(f, pvWriteCmd+";"+pvReadCmd)
}
// Test waits for the upgrade to complete, and then verifies that a pod can still consume the pv
// and that the volume data persists.
func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
By("Consuming the PV after upgrade")
t.testPod(f, pvReadCmd)
}
// Teardown cleans up any remaining resources.
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, t.pv, t.pvc)
if err := t.deleteGCEVolume(t.pvSource); err != nil {
errs = append(errs, err)
}
if len(errs) > 0 {
framework.Failf("Failed to delete 1 or more PVs/PVCs and/or the GCE volume. Errors: %v", utilerrors.NewAggregate(errs))
}
}
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) {
pod := framework.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
expectedOutput := []string{pvTestData}
f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput)
}

141
vendor/k8s.io/kubernetes/test/e2e/upgrades/sysctl.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/test/e2e/framework"
)
// SecretUpgradeTest tests that a pod with sysctls runs before and after an upgrade. During
// a master upgrade, the exact pod is expected to stay running. A pod with unsafe sysctls is
// expected to keep failing before and after the upgrade.
type SysctlUpgradeTest struct {
validPod *v1.Pod
invalidPod *v1.Pod
}
// Setup creates two pods: one with safe sysctls, one with unsafe sysctls. It checks that the former
// launched and the later is rejected.
func (t *SysctlUpgradeTest) Setup(f *framework.Framework) {
t.validPod = t.verifySafeSysctlWork(f)
t.invalidPod = t.verifyUnsafeSysctlsAreRejected(f)
}
// Test waits for the upgrade to complete, and then verifies that a
// pod can still consume the ConfigMap.
func (t *SysctlUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
switch upgrade {
case MasterUpgrade:
By("Checking the safe sysctl pod keeps running on master upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.validPod.Namespace).Get(t.validPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(pod.Status.Phase).To(Equal(v1.PodRunning))
}
By("Checking the old unsafe sysctl pod was not suddenly started during an upgrade")
pod, err := f.ClientSet.CoreV1().Pods(t.invalidPod.Namespace).Get(t.invalidPod.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
if err == nil {
Expect(pod.Status.Phase).NotTo(Equal(v1.PodRunning))
}
t.verifySafeSysctlWork(f)
t.verifyUnsafeSysctlsAreRejected(f)
}
// Teardown cleans up any remaining resources.
func (t *SysctlUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
func (t *SysctlUpgradeTest) verifySafeSysctlWork(f *framework.Framework) *v1.Pod {
By("Creating a pod with safe sysctls")
safeSysctl := "net.ipv4.ip_local_port_range"
safeSysctlValue := "1024 1042"
validPod := sysctlTestPod("valid-sysctls", map[string]string{safeSysctl: safeSysctlValue})
validPod = f.PodClient().Create(t.validPod)
By("Making sure the valid pod launches")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(t.validPod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
f.TestContainerOutput("pod with safe sysctl launched", t.validPod, 0, []string{fmt.Sprintf("%s = %s", safeSysctl, safeSysctlValue)})
return validPod
}
func (t *SysctlUpgradeTest) verifyUnsafeSysctlsAreRejected(f *framework.Framework) *v1.Pod {
By("Creating a pod with unsafe sysctls")
invalidPod := sysctlTestPod("valid-sysctls-"+string(uuid.NewUUID()), map[string]string{
"fs.mount-max": "1000000",
})
invalidPod = f.PodClient().Create(invalidPod)
By("Making sure the invalid pod failed")
ev, err := f.PodClient().WaitForErrorEventOrSuccess(invalidPod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev.Reason).To(Equal(sysctl.ForbiddenReason))
return invalidPod
}
func sysctlTestPod(name string, sysctls map[string]string) *v1.Pod {
sysctlList := []v1.Sysctl{}
keys := []string{}
for k, v := range sysctls {
sysctlList = append(sysctlList, v1.Sysctl{Name: k, Value: v})
keys = append(keys, k)
}
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
v1.SysctlsPodAnnotationKey: v1helper.PodAnnotationsFromSysctls(sysctlList),
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
Image: "busybox",
Command: append([]string{"/bin/sysctl"}, keys...),
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
}

84
vendor/k8s.io/kubernetes/test/e2e/upgrades/upgrade.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package upgrades provides a framework for testing Kubernetes
// features before, during, and after different types of upgrades.
package upgrades
import (
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
)
// UpgradeType represents different types of upgrades.
type UpgradeType int
const (
// MasterUpgrade indicates that only the master is being upgraded.
MasterUpgrade UpgradeType = iota
// NodeUpgrade indicates that only the nodes are being upgraded.
NodeUpgrade
// ClusterUpgrade indicates that both master and nodes are
// being upgraded.
ClusterUpgrade
// EtcdUpgrade indicates that only etcd is being upgraded (or migrated
// between storage versions).
EtcdUpgrade
)
// Test is an interface for upgrade tests.
type Test interface {
// Name should return a test name sans spaces.
Name() string
// Setup should create and verify whatever objects need to
// exist before the upgrade disruption starts.
Setup(f *framework.Framework)
// Test will run during the upgrade. When the upgrade is
// complete, done will be closed and final validation can
// begin.
Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType)
// Teardown should clean up any objects that are created that
// aren't already cleaned up by the framework. This will
// always be called, even if Setup failed.
Teardown(f *framework.Framework)
}
// Skippable is an interface that an upgrade test can implement to be
// able to indicate that it should be skipped.
type Skippable interface {
// Skip should return true if test should be skipped. upgCtx
// provides information about the upgrade that is going to
// occur.
Skip(upgCtx UpgradeContext) bool
}
// UpgradeContext contains information about all the stages of the
// upgrade that is going to occur.
type UpgradeContext struct {
Versions []VersionContext
}
// VersionContext represents a stage of the upgrade.
type VersionContext struct {
Version version.Version
NodeImage string
}