vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

49
vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/BUILD generated vendored Normal file
View File

@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"daemonsets.go",
"deployments.go",
"job.go",
"replicasets.go",
"statefulset.go",
],
importpath = "k8s.io/kubernetes/test/e2e/upgrades/apps",
deps = [
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,182 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
)
// DaemonSetUpgradeTest tests that a DaemonSet is running before and after
// a cluster upgrade.
type DaemonSetUpgradeTest struct {
daemonSet *extensions.DaemonSet
}
func (DaemonSetUpgradeTest) Name() string { return "[sig-apps] daemonset-upgrade" }
// Setup creates a DaemonSet and verifies that it's running
func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
daemonSetName := "ds1"
labelSet := map[string]string{"ds-name": daemonSetName}
image := framework.ServeHostnameImage
ns := f.Namespace
t.daemonSet = &extensions.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.Name,
Name: daemonSetName,
},
Spec: extensions.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labelSet,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: daemonSetName,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
}
By("Creating a DaemonSet")
var err error
if t.daemonSet, err = f.ClientSet.ExtensionsV1beta1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
}
By("Waiting for DaemonSet pods to become ready")
err = wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
return checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
})
framework.ExpectNoError(err)
By("Validating the DaemonSet after creation")
t.validateRunningDaemonSet(f)
}
// Test waits until the upgrade has completed and then verifies that the DaemonSet
// is still running
func (t *DaemonSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
By("Waiting for upgradet to complete before re-validating DaemonSet")
<-done
By("validating the DaemonSet is still running after upgrade")
t.validateRunningDaemonSet(f)
}
// Teardown cleans up any remaining resources.
func (t *DaemonSetUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) {
By("confirming the DaemonSet pods are running on all expected nodes")
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
framework.ExpectNoError(err)
if !res {
framework.Failf("expected DaemonSet pod to be running on all nodes, it was not")
}
// DaemonSet resource itself should be good
By("confirming the DaemonSet resource is in a good state")
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
framework.ExpectNoError(err)
if !res {
framework.Failf("expected DaemonSet to be in a good state, it was not")
}
}
func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector map[string]string) (bool, error) {
nodeList, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return false, err
}
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
if len(node.Spec.Taints) == 0 {
nodeNames = append(nodeNames, node.Name)
} else {
framework.Logf("Node %v not expected to have DaemonSet pod, has taints %v", node.Name, node.Spec.Taints)
}
}
return checkDaemonPodOnNodes(f, namespace, selector, nodeNames)
}
func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet map[string]string, nodeNames []string) (bool, error) {
selector := labels.Set(labelSet).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := f.ClientSet.Core().Pods(namespace).List(options)
if err != nil {
return false, err
}
pods := podList.Items
nodesToPodCount := make(map[string]int)
for _, pod := range pods {
if controller.IsPodActive(&pod) {
framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
nodesToPodCount[pod.Spec.NodeName]++
}
}
framework.Logf("nodesToPodCount: %v", nodesToPodCount)
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
for _, nodeName := range nodeNames {
if nodesToPodCount[nodeName] != 1 {
return false, nil
}
}
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
// other nodes.
return len(nodesToPodCount) == len(nodeNames), nil
}
func checkDaemonStatus(f *framework.Framework, namespace string, dsName string) (bool, error) {
ds, err := f.ClientSet.ExtensionsV1beta1().DaemonSets(namespace).Get(dsName, metav1.GetOptions{})
if err != nil {
return false, err
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready {
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,172 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
deploymentName = "dp"
)
// TODO: Test that the deployment stays available during master (and maybe
// node and cluster upgrades).
// DeploymentUpgradeTest tests that a deployment is using the same replica
// sets before and after a cluster upgrade.
type DeploymentUpgradeTest struct {
oldDeploymentUID types.UID
oldRSUID types.UID
newRSUID types.UID
}
func (DeploymentUpgradeTest) Name() string { return "[sig-apps] deployment-upgrade" }
// Setup creates a deployment and makes sure it has a new and an old replicaset running.
func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, extensions.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicaset revision 1 of deployment %q", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 1 {
framework.ExpectNoError(fmt.Errorf("expected one replicaset, got %d", len(rss)))
}
t.oldRSUID = rss[0].UID
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 1", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "1"))
// Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting deployment %q to complete", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
By(fmt.Sprintf("Getting replicasets revision 1 and 2 of deployment %q", deploymentName))
rsList, err = rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss = rsList.Items
if len(rss) != 2 {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
By(fmt.Sprintf("Checking replicaset of deployment %q that is created before rollout survives the rollout", deploymentName))
switch t.oldRSUID {
case rss[0].UID:
t.newRSUID = rss[1].UID
case rss[1].UID:
t.newRSUID = rss[0].UID
default:
framework.ExpectNoError(fmt.Errorf("old replicaset with UID %q does not survive rollout", t.oldRSUID))
}
By(fmt.Sprintf("Waiting for revision of the deployment %q to become 2", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentRevision(c, deployment, "2"))
t.oldDeploymentUID = deployment.UID
}
// Test checks whether the replicasets for a deployment are the same after an upgrade.
func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicasets for deployment %q", deploymentName))
<-done
c := f.ClientSet
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
By(fmt.Sprintf("Checking UID to verify deployment %q survives upgrade", deploymentName))
Expect(deployment.UID).To(Equal(t.oldDeploymentUID))
By(fmt.Sprintf("Verifying deployment %q does not create new replicasets", deploymentName))
rsSelector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
rsList, err := rsClient.List(metav1.ListOptions{LabelSelector: rsSelector.String()})
framework.ExpectNoError(err)
rss := rsList.Items
if len(rss) != 2 {
framework.ExpectNoError(fmt.Errorf("expected 2 replicaset, got %d", len(rss)))
}
switch t.oldRSUID {
case rss[0].UID:
Expect(rss[1].UID).To(Equal(t.newRSUID))
case rss[1].UID:
Expect(rss[0].UID).To(Equal(t.newRSUID))
default:
framework.ExpectNoError(fmt.Errorf("new replicasets are created during upgrade of deployment %q", deploymentName))
}
By(fmt.Sprintf("Verifying revision of the deployment %q is still 2", deploymentName))
Expect(deployment.Annotations[deploymentutil.RevisionAnnotation]).To(Equal("2"))
By(fmt.Sprintf("Waiting for deployment %q to complete adoption", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
// Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *extensions.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for deployment %q to complete after scaling", deploymentName))
framework.ExpectNoError(framework.WaitForDeploymentComplete(c, deployment))
}
// Teardown cleans up any remaining resources.
func (t *DeploymentUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}

64
vendor/k8s.io/kubernetes/test/e2e/upgrades/apps/job.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// JobUpgradeTest is a test harness for batch Jobs.
type JobUpgradeTest struct {
job *batch.Job
namespace string
}
func (JobUpgradeTest) Name() string { return "[sig-apps] job-upgrade" }
// Setup starts a Job with a parallelism of 2 and 2 completions running.
func (t *JobUpgradeTest) Setup(f *framework.Framework) {
t.namespace = f.Namespace.Name
By("Creating a job")
t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
t.job = job
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == parallelism")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
Expect(err).NotTo(HaveOccurred())
}
// Test verifies that the Jobs Pods are running after the an upgrade
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
By("Ensuring active pods == parallelism")
running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(BeTrue())
}
// Teardown cleans up any remaining resources.
func (t *JobUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}

View File

@ -0,0 +1,101 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"fmt"
"time"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
. "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
const (
interval = 10 * time.Second
timeout = 5 * time.Minute
rsName = "rs"
scaleNum = 2
)
// TODO: Test that the replicaset stays available during master (and maybe
// node and cluster upgrades).
// ReplicaSetUpgradeTest tests that a replicaset survives upgrade.
type ReplicaSetUpgradeTest struct {
UID types.UID
}
func (ReplicaSetUpgradeTest) Name() string { return "[sig-apps] replicaset-upgrade" }
func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
c := f.ClientSet
ns := f.Namespace.Name
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.Extensions().ReplicaSets(ns).Create(replicaSet)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
r.UID = rs.UID
}
// Test checks whether the replicasets are the same after an upgrade.
func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
c := f.ClientSet
ns := f.Namespace.Name
rsClient := c.Extensions().ReplicaSets(ns)
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
<-done
// Verify the RS is the same (survives) after the upgrade
By(fmt.Sprintf("Checking UID to verify replicaset %s survives upgrade", rsName))
upgradedRS, err := rsClient.Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
if upgradedRS.UID != r.UID {
framework.ExpectNoError(fmt.Errorf("expected same replicaset UID: %v got: %v", r.UID, upgradedRS.UID))
}
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after upgrade", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *extensions.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready after scaling", rsName))
framework.ExpectNoError(framework.WaitForReadyReplicaSet(c, ns, rsName))
}
// Teardown cleans up any remaining resources.
func (r *ReplicaSetUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/upgrades"
)
// StatefulSetUpgradeTest implements an upgrade test harness for StatefulSet upgrade testing.
type StatefulSetUpgradeTest struct {
tester *framework.StatefulSetTester
service *v1.Service
set *apps.StatefulSet
}
func (StatefulSetUpgradeTest) Name() string { return "[sig-apps] statefulset-upgrade" }
func (StatefulSetUpgradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
minVersion := version.MustParseSemantic("1.5.0")
for _, vCtx := range upgCtx.Versions {
if vCtx.Version.LessThan(minVersion) {
return true
}
}
return false
}
// Setup creates a StatefulSet and a HeadlessService. It verifies the basic SatefulSet properties
func (t *StatefulSetUpgradeTest) Setup(f *framework.Framework) {
ssName := "ss"
labels := map[string]string{
"foo": "bar",
"baz": "blah",
}
headlessSvcName := "test"
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ns := f.Namespace.Name
t.set = framework.NewStatefulSet(ssName, ns, headlessSvcName, 2, statefulPodMounts, podMounts, labels)
t.service = framework.CreateStatefulSetService(ssName, labels)
*(t.set.Spec.Replicas) = 3
t.tester = framework.NewStatefulSetTester(f.ClientSet)
t.tester.PauseNewPods(t.set)
By("Creating service " + headlessSvcName + " in namespace " + ns)
_, err := f.ClientSet.Core().Services(ns).Create(t.service)
Expect(err).NotTo(HaveOccurred())
By("Creating statefulset " + ssName + " in namespace " + ns)
*(t.set.Spec.Replicas) = 3
_, err = f.ClientSet.AppsV1beta1().StatefulSets(ns).Create(t.set)
Expect(err).NotTo(HaveOccurred())
By("Saturating stateful set " + t.set.Name)
t.tester.Saturate(t.set)
t.verify()
t.restart()
t.verify()
}
// Waits for the upgrade to complete and verifies the StatefulSet basic functionality
func (t *StatefulSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
<-done
t.verify()
}
// Deletes all StatefulSets
func (t *StatefulSetUpgradeTest) Teardown(f *framework.Framework) {
framework.DeleteAllStatefulSets(f.ClientSet, t.set.Name)
}
func (t *StatefulSetUpgradeTest) verify() {
By("Verifying statefulset mounted data directory is usable")
framework.ExpectNoError(t.tester.CheckMount(t.set, "/data"))
By("Verifying statefulset provides a stable hostname for each pod")
framework.ExpectNoError(t.tester.CheckHostname(t.set))
By("Verifying statefulset set proper service name")
framework.ExpectNoError(t.tester.CheckServiceName(t.set, t.set.Spec.ServiceName))
cmd := "echo $(hostname) > /data/hostname; sync;"
By("Running " + cmd + " in all stateful pods")
framework.ExpectNoError(t.tester.ExecInStatefulPods(t.set, cmd))
}
func (t *StatefulSetUpgradeTest) restart() {
By("Restarting statefulset " + t.set.Name)
t.tester.Restart(t.set)
t.tester.WaitForRunningAndReady(*t.set.Spec.Replicas, t.set)
}