vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

64
vendor/k8s.io/kubernetes/test/e2e/lifecycle/BUILD generated vendored Normal file
View File

@ -0,0 +1,64 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"addon_update.go",
"cluster_upgrade.go",
"framework.go",
"ha_master.go",
"node_auto_repairs.go",
"reboot.go",
"resize_nodes.go",
"restart.go",
],
importpath = "k8s.io/kubernetes/test/e2e/lifecycle",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/e2e/upgrades/apps:go_default_library",
"//test/e2e/upgrades/storage:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//test/utils/junit:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/lifecycle/bootstrap:all-srcs",
],
tags = ["automanaged"],
)

4
vendor/k8s.io/kubernetes/test/e2e/lifecycle/OWNERS generated vendored Normal file
View File

@ -0,0 +1,4 @@
approvers:
- sig-cluster-lifecycle-maintainers
reviewers:
- sig-cluster-lifecycle

View File

@ -0,0 +1,445 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"time"
"golang.org/x/crypto/ssh"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// TODO: it would probably be slightly better to build up the objects
// in the code and then serialize to yaml.
var reconcile_addon_controller = `
apiVersion: v1
kind: ReplicationController
metadata:
name: addon-reconcile-test
namespace: %s
labels:
k8s-app: addon-reconcile-test
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 2
selector:
k8s-app: addon-reconcile-test
template:
metadata:
labels:
k8s-app: addon-reconcile-test
spec:
containers:
- image: %s
name: addon-reconcile-test
ports:
- containerPort: 9376
protocol: TCP
`
// Should update "reconcile" class addon.
var reconcile_addon_controller_updated = `
apiVersion: v1
kind: ReplicationController
metadata:
name: addon-reconcile-test
namespace: %s
labels:
k8s-app: addon-reconcile-test
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
newLabel: addon-reconcile-test
spec:
replicas: 2
selector:
k8s-app: addon-reconcile-test
template:
metadata:
labels:
k8s-app: addon-reconcile-test
spec:
containers:
- image: %s
name: addon-reconcile-test
ports:
- containerPort: 9376
protocol: TCP
`
var ensure_exists_addon_service = `
apiVersion: v1
kind: Service
metadata:
name: addon-ensure-exists-test
namespace: %s
labels:
k8s-app: addon-ensure-exists-test
addonmanager.kubernetes.io/mode: EnsureExists
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-ensure-exists-test
`
// Should create but don't update "ensure exist" class addon.
var ensure_exists_addon_service_updated = `
apiVersion: v1
kind: Service
metadata:
name: addon-ensure-exists-test
namespace: %s
labels:
k8s-app: addon-ensure-exists-test
addonmanager.kubernetes.io/mode: EnsureExists
newLabel: addon-ensure-exists-test
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-ensure-exists-test
`
var deprecated_label_addon_service = `
apiVersion: v1
kind: Service
metadata:
name: addon-deprecated-label-test
namespace: %s
labels:
k8s-app: addon-deprecated-label-test
kubernetes.io/cluster-service: "true"
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-deprecated-label-test
`
// Should update addon with label "kubernetes.io/cluster-service=true".
var deprecated_label_addon_service_updated = `
apiVersion: v1
kind: Service
metadata:
name: addon-deprecated-label-test
namespace: %s
labels:
k8s-app: addon-deprecated-label-test
kubernetes.io/cluster-service: "true"
newLabel: addon-deprecated-label-test
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-deprecated-label-test
`
// Should not create addon without valid label.
var invalid_addon_controller = `
apiVersion: v1
kind: ReplicationController
metadata:
name: invalid-addon-test
namespace: %s
labels:
k8s-app: invalid-addon-test
addonmanager.kubernetes.io/mode: NotMatch
spec:
replicas: 2
selector:
k8s-app: invalid-addon-test
template:
metadata:
labels:
k8s-app: invalid-addon-test
spec:
containers:
- image: %s
name: invalid-addon-test
ports:
- containerPort: 9376
protocol: TCP
`
const (
addonTestPollInterval = 3 * time.Second
addonTestPollTimeout = 5 * time.Minute
addonNsName = metav1.NamespaceSystem
)
var serveHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname)
type stringPair struct {
data, fileName string
}
var _ = SIGDescribe("Addon update", func() {
var dir string
var sshClient *ssh.Client
f := framework.NewDefaultFramework("addon-update-test")
BeforeEach(func() {
// This test requires:
// - SSH master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
if !framework.ProviderIs("gce") {
return
}
var err error
sshClient, err = getMasterSSHClient()
Expect(err).NotTo(HaveOccurred(), "Failed to get the master SSH client.")
})
AfterEach(func() {
if sshClient != nil {
sshClient.Close()
}
})
// WARNING: the test is not parallel-friendly!
It("should propagate add-on file changes [Slow]", func() {
// This test requires:
// - SSH
// - master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
framework.SkipUnlessProviderIs("gce")
//these tests are long, so I squeezed several cases in one scenario
Expect(sshClient).NotTo(BeNil())
dir = f.Namespace.Name // we use it only to give a unique string for each test execution
temporaryRemotePathPrefix := "addon-test-dir"
temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master
defer sshExec(sshClient, fmt.Sprintf("rm -rf %s", temporaryRemotePathPrefix)) // ignore the result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("mkdir -p %s", temporaryRemotePath))
rcAddonReconcile := "addon-reconcile-controller.yaml"
rcAddonReconcileUpdated := "addon-reconcile-controller-Updated.yaml"
rcInvalid := "invalid-addon-controller.yaml"
svcAddonDeprecatedLabel := "addon-deprecated-label-service.yaml"
svcAddonDeprecatedLabelUpdated := "addon-deprecated-label-service-updated.yaml"
svcAddonEnsureExists := "addon-ensure-exists-service.yaml"
svcAddonEnsureExistsUpdated := "addon-ensure-exists-service-updated.yaml"
var remoteFiles []stringPair = []stringPair{
{fmt.Sprintf(reconcile_addon_controller, addonNsName, serveHostnameImage), rcAddonReconcile},
{fmt.Sprintf(reconcile_addon_controller_updated, addonNsName, serveHostnameImage), rcAddonReconcileUpdated},
{fmt.Sprintf(deprecated_label_addon_service, addonNsName), svcAddonDeprecatedLabel},
{fmt.Sprintf(deprecated_label_addon_service_updated, addonNsName), svcAddonDeprecatedLabelUpdated},
{fmt.Sprintf(ensure_exists_addon_service, addonNsName), svcAddonEnsureExists},
{fmt.Sprintf(ensure_exists_addon_service_updated, addonNsName), svcAddonEnsureExistsUpdated},
{fmt.Sprintf(invalid_addon_controller, addonNsName, serveHostnameImage), rcInvalid},
}
for _, p := range remoteFiles {
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
Expect(err).NotTo(HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
}
// directory on kubernetes-master
destinationDirPrefix := "/etc/kubernetes/addons/addon-test-dir"
destinationDir := destinationDirPrefix + "/" + dir
// cleanup from previous tests
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
Expect(err).NotTo(HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
By("copy invalid manifests to the destination dir")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid))
// we will verify at the end of the test that the objects weren't created from the invalid manifests
By("copy new manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcile, destinationDir, rcAddonReconcile))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabel, destinationDir, svcAddonDeprecatedLabel))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
// Delete the "ensure exist class" addon at the end.
defer func() {
framework.Logf("Cleaning up ensure exist class addon.")
Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(HaveOccurred())
}()
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
// Replace the manifests with new contents.
By("update manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcileUpdated, destinationDir, rcAddonReconcile))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabelUpdated, destinationDir, svcAddonDeprecatedLabel))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExistsUpdated, destinationDir, svcAddonEnsureExists))
// Wait for updated addons to have the new added label.
reconcileSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-reconcile-test"}))
waitForReplicationControllerwithSelectorInAddonTest(f.ClientSet, addonNsName, true, reconcileSelector)
deprecatedLabelSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-deprecated-label-test"}))
waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, true, deprecatedLabelSelector)
// "Ensure exist class" addon should not be updated.
ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"}))
waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, false, ensureExistSelector)
By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists))
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", false)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", false)
// "Ensure exist class" addon will not be deleted when manifest is removed.
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
By("verify invalid addons weren't created")
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{})
Expect(err).To(HaveOccurred())
// Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function.
})
})
func waitForServiceInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
func waitForServicewithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) {
framework.ExpectNoError(framework.WaitForServiceWithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout))
}
func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) {
framework.ExpectNoError(framework.WaitForReplicationControllerwithSelector(c, addonNamespace, selector, exist, addonTestPollInterval,
addonTestPollTimeout))
}
// TODO use the framework.SSH code, either adding an SCP to it or copying files
// differently.
func getMasterSSHClient() (*ssh.Client, error) {
// Get a signer for the provider.
signer, err := framework.GetSigner(framework.TestContext.Provider)
if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err)
}
sshUser := os.Getenv("KUBE_SSH_USER")
if sshUser == "" {
sshUser = os.Getenv("USER")
}
config := &ssh.ClientConfig{
User: sshUser,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
host := framework.GetMasterHost() + ":22"
client, err := ssh.Dial("tcp", host, config)
if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)
}
return client, err
}
func sshExecAndVerify(client *ssh.Client, cmd string) {
_, _, rc, err := sshExec(client, cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
Expect(rc).To(Equal(0), "error return code from executing command on the cluster: %s", cmd)
}
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
}
defer session.Close()
// Run the command.
code := 0
var bout, berr bytes.Buffer
session.Stdout, session.Stderr = &bout, &berr
err = session.Run(cmd)
if err != nil {
// Check whether the command failed to run or didn't complete.
if exiterr, ok := err.(*ssh.ExitError); ok {
// If we got an ExitError and the exit code is nonzero, we'll
// consider the SSH itself successful (just that the command run
// errored on the host).
if code = exiterr.ExitStatus(); code != 0 {
err = nil
}
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err)
}
}
return bout.String(), berr.String(), code, err
}
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)
}
defer session.Close()
fileSize := len(data)
pipe, err := session.StdinPipe()
if err != nil {
return err
}
defer pipe.Close()
if err := session.Start(fmt.Sprintf("scp -t %s", dir)); err != nil {
return err
}
fmt.Fprintf(pipe, "C%#o %d %s\n", mode, fileSize, fileName)
io.Copy(pipe, strings.NewReader(data))
fmt.Fprint(pipe, "\x00")
pipe.Close()
return session.Wait()
}

View File

@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"bootstrap_signer.go",
"bootstrap_token_cleaner.go",
"util.go",
],
importpath = "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap",
deps = [
"//pkg/bootstrap/api:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/lifecycle:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,10 @@
approvers: #sig-cluster-lifecycle is the owner of this feature
- jbeda
- luxas
- wanghaoran1988
reviewers:
- mikedanese
- luxas
- dmmcquay
- krousey
- wanghaoran1988

View File

@ -0,0 +1,124 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/lifecycle"
)
const (
TokenIDBytes = 3
TokenSecretBytes = 8
)
var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
var c clientset.Interface
f := framework.NewDefaultFramework("bootstrap-signer")
AfterEach(func() {
if len(secretNeedClean) > 0 {
By("delete the bootstrap token secret")
err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
secretNeedClean = ""
}
})
BeforeEach(func() {
c = f.ClientSet
})
It("should sign the new added bootstrap tokens", func() {
By("create a new bootstrap token secret")
tokenId, err := GenerateTokenId()
Expect(err).NotTo(HaveOccurred())
secret := newTokenSecret(tokenId, "tokenSecret")
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
Expect(err).NotTo(HaveOccurred())
By("wait for the bootstrap token secret be signed")
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
Expect(err).NotTo(HaveOccurred())
})
It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func() {
By("create a new bootstrap token secret")
tokenId, err := GenerateTokenId()
Expect(err).NotTo(HaveOccurred())
secret := newTokenSecret(tokenId, "tokenSecret")
secret, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
By("wait for the bootstrap token secret be signed")
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
cfgMap, err := f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
signedToken, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenId]
Expect(ok).Should(Equal(true))
By("update the cluster-info ConfigMap")
originalData := cfgMap.Data[bootstrapapi.KubeConfigKey]
updatedKubeConfig, err := randBytes(20)
Expect(err).NotTo(HaveOccurred())
cfgMap.Data[bootstrapapi.KubeConfigKey] = updatedKubeConfig
_, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("update back the cluster-info ConfigMap")
cfgMap, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
cfgMap.Data[bootstrapapi.KubeConfigKey] = originalData
_, err = f.ClientSet.CoreV1().ConfigMaps(metav1.NamespacePublic).Update(cfgMap)
Expect(err).NotTo(HaveOccurred())
}()
By("wait for signed bootstrap token updated")
err = WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c, tokenId, signedToken)
Expect(err).NotTo(HaveOccurred())
})
It("should delete the signed bootstrap tokens from clusterInfo ConfigMap when bootstrap token is deleted", func() {
By("create a new bootstrap token secret")
tokenId, err := GenerateTokenId()
Expect(err).NotTo(HaveOccurred())
secret := newTokenSecret(tokenId, "tokenSecret")
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
Expect(err).NotTo(HaveOccurred())
By("wait for the bootstrap secret be signed")
err = WaitforSignedClusterInfoByBootStrapToken(c, tokenId)
Expect(err).NotTo(HaveOccurred())
By("delete the bootstrap token secret")
err = c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(bootstrapapi.BootstrapTokenSecretPrefix+tokenId, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
By("wait for the bootstrap token removed from cluster-info ConfigMap")
err = WaitForSignedClusterInfoByBootstrapTokenToDisappear(c, tokenId)
Expect(err).NotTo(HaveOccurred())
})
})

View File

@ -0,0 +1,85 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/lifecycle"
)
var secretNeedClean string
var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
var c clientset.Interface
f := framework.NewDefaultFramework("bootstrap-token-cleaner")
BeforeEach(func() {
c = f.ClientSet
})
AfterEach(func() {
if len(secretNeedClean) > 0 {
By("delete the bootstrap token secret")
err := c.CoreV1().Secrets(metav1.NamespaceSystem).Delete(secretNeedClean, &metav1.DeleteOptions{})
secretNeedClean = ""
Expect(err).NotTo(HaveOccurred())
}
})
It("should delete the token secret when the secret expired", func() {
By("create a new expired bootstrap token secret")
tokenId, err := GenerateTokenId()
Expect(err).NotTo(HaveOccurred())
tokenSecret, err := GenerateTokenSecret()
Expect(err).NotTo(HaveOccurred())
secret := newTokenSecret(tokenId, tokenSecret)
addSecretExpiration(secret, TimeStringFromNow(-time.Hour))
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
Expect(err).NotTo(HaveOccurred())
By("wait for the bootstrap token secret be deleted")
err = WaitForBootstrapTokenSecretToDisappear(c, tokenId)
Expect(err).NotTo(HaveOccurred())
})
It("should not delete the token secret when the secret is not expired", func() {
By("create a new expired bootstrap token secret")
tokenId, err := GenerateTokenId()
Expect(err).NotTo(HaveOccurred())
tokenSecret, err := GenerateTokenSecret()
Expect(err).NotTo(HaveOccurred())
secret := newTokenSecret(tokenId, tokenSecret)
addSecretExpiration(secret, TimeStringFromNow(time.Hour))
_, err = c.CoreV1().Secrets(metav1.NamespaceSystem).Create(secret)
secretNeedClean = bootstrapapi.BootstrapTokenSecretPrefix + tokenId
Expect(err).NotTo(HaveOccurred())
By("wait for the bootstrap token secret not be deleted")
err = WaitForBootstrapTokenSecretNotDisappear(c, tokenId, 20*time.Second)
Expect(err).NotTo(HaveOccurred())
})
})

View File

@ -0,0 +1,155 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package bootstrap
import (
"crypto/rand"
"encoding/hex"
"errors"
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
"k8s.io/kubernetes/test/e2e/framework"
)
func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: metav1.NamespaceSystem,
Name: bootstrapapi.BootstrapTokenSecretPrefix + tokenID,
},
Type: bootstrapapi.SecretTypeBootstrapToken,
Data: map[string][]byte{
bootstrapapi.BootstrapTokenIDKey: []byte(tokenID),
bootstrapapi.BootstrapTokenSecretKey: []byte(tokenSecret),
bootstrapapi.BootstrapTokenUsageSigningKey: []byte("true"),
},
}
}
func GenerateTokenId() (string, error) {
tokenID, err := randBytes(TokenIDBytes)
if err != nil {
return "", err
}
return tokenID, nil
}
func GenerateTokenSecret() (string, error) {
tokenSecret, err := randBytes(TokenSecretBytes)
if err != nil {
return "", err
}
return tokenSecret, err
}
func randBytes(length int) (string, error) {
b := make([]byte, length)
_, err := rand.Read(b)
if err != nil {
return "", err
}
return hex.EncodeToString(b), nil
}
func addSecretExpiration(s *v1.Secret, expiration string) {
s.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(expiration)
}
func TimeStringFromNow(delta time.Duration) string {
return time.Now().Add(delta).Format(time.RFC3339)
}
func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID string) error {
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err
}
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
if !ok {
return false, nil
}
return true, nil
})
}
func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, tokenID string, signedToken string) error {
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err
}
updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
if !ok || updated == signedToken {
return false, nil
}
return true, nil
})
}
func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface, tokenID string) error {
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err
}
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
if ok {
return false, nil
}
return true, nil
})
}
func WaitForBootstrapTokenSecretToDisappear(c clientset.Interface, tokenID string) error {
return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
_, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
return true, nil
}
return false, nil
})
}
func WaitForBootstrapTokenSecretNotDisappear(c clientset.Interface, tokenID string, t time.Duration) error {
err := wait.Poll(framework.Poll, t, func() (bool, error) {
secret, err := c.CoreV1().Secrets(metav1.NamespaceSystem).Get(bootstrapapi.BootstrapTokenSecretPrefix+tokenID, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
return true, errors.New("secret not exists")
}
if secret != nil {
return false, nil
}
return true, err
})
if err == wait.ErrWaitTimeout {
return nil
}
return err
}

View File

@ -0,0 +1,451 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"encoding/xml"
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
"k8s.io/client-go/discovery"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
"k8s.io/kubernetes/test/e2e/upgrades"
apps "k8s.io/kubernetes/test/e2e/upgrades/apps"
"k8s.io/kubernetes/test/e2e/upgrades/storage"
"k8s.io/kubernetes/test/utils/junit"
. "github.com/onsi/ginkgo"
)
var upgradeTests = []upgrades.Test{
&upgrades.ServiceUpgradeTest{},
&upgrades.SecretUpgradeTest{},
&apps.ReplicaSetUpgradeTest{},
&apps.StatefulSetUpgradeTest{},
&apps.DeploymentUpgradeTest{},
&apps.JobUpgradeTest{},
&upgrades.ConfigMapUpgradeTest{},
&upgrades.HPAUpgradeTest{},
&storage.PersistentVolumeUpgradeTest{},
&apps.DaemonSetUpgradeTest{},
&upgrades.IngressUpgradeTest{},
&upgrades.AppArmorUpgradeTest{},
}
var statefulsetUpgradeTests = []upgrades.Test{
&upgrades.MySqlUpgradeTest{},
&upgrades.EtcdUpgradeTest{},
&upgrades.CassandraUpgradeTest{},
}
var kubeProxyUpgradeTests = []upgrades.Test{
&upgrades.KubeProxyUpgradeTest{},
&upgrades.ServiceUpgradeTest{},
&upgrades.IngressUpgradeTest{},
}
var kubeProxyDowngradeTests = []upgrades.Test{
&upgrades.KubeProxyDowngradeTest{},
&upgrades.ServiceUpgradeTest{},
&upgrades.IngressUpgradeTest{},
}
var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
f := framework.NewDefaultFramework("cluster-upgrade")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks(upgradeTests)
Describe("master upgrade", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Master upgrade"}
masterUpgradeTest := &junit.TestCase{
Name: "[sig-cluster-lifecycle] master-upgrade",
Classname: "upgrade_tests",
}
testSuite.TestCases = append(testSuite.TestCases, masterUpgradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, masterUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.MasterUpgrade, upgradeFunc)
})
})
Describe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Node upgrade"}
nodeUpgradeTest := &junit.TestCase{
Name: "node-upgrade",
Classname: "upgrade_tests",
}
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, nodeUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.NodeUpgrade, upgradeFunc)
})
})
Describe("cluster upgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
clusterUpgradeTest := &junit.TestCase{Name: "[sig-cluster-lifecycle] cluster-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, clusterUpgradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, clusterUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
f := framework.NewDefaultFramework("cluster-downgrade")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks(upgradeTests)
Describe("cluster downgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
clusterDowngradeTest := &junit.TestCase{Name: "[sig-cluster-lifecycle] cluster-downgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, clusterDowngradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, clusterDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
var _ = SIGDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
f := framework.NewDefaultFramework("etc-upgrade")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks(upgradeTests)
Describe("etcd upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Etcd upgrade"}
etcdTest := &junit.TestCase{Name: "[sig-cluster-lifecycle] etcd-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, etcdTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, etcdTest)
framework.ExpectNoError(framework.EtcdUpgrade(framework.TestContext.EtcdUpgradeStorage, framework.TestContext.EtcdUpgradeVersion))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.EtcdUpgrade, upgradeFunc)
})
})
})
var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func() {
f := framework.NewDefaultFramework("stateful-upgrade")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
framework.KubeDescribe("stateful upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Stateful upgrade"}
statefulUpgradeTest := &junit.TestCase{Name: "[sig-apps] stateful-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, statefulUpgradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, statefulUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(target))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]", func() {
f := framework.NewDefaultFramework("kube-proxy-ds-migration")
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
})
Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() {
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "kube-proxy upgrade"}
kubeProxyUpgradeTest := &junit.TestCase{
Name: "kube-proxy-ds-upgrade",
Classname: "upgrade_tests",
}
testSuite.TestCases = append(testSuite.TestCases, kubeProxyUpgradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, kubeProxyUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, true))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() {
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "kube-proxy downgrade"}
kubeProxyDowngradeTest := &junit.TestCase{
Name: "kube-proxy-ds-downgrade",
Classname: "upgrade_tests",
}
testSuite.TestCases = append(testSuite.TestCases, kubeProxyDowngradeTest)
upgradeFunc := func() {
start := time.Now()
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, false))
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
}
runUpgradeSuite(f, kubeProxyDowngradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
})
})
})
type chaosMonkeyAdapter struct {
test upgrades.Test
testReport *junit.TestCase
framework *framework.Framework
upgradeType upgrades.UpgradeType
upgCtx upgrades.UpgradeContext
}
func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) {
start := time.Now()
var once sync.Once
ready := func() {
once.Do(func() {
sem.Ready()
})
}
defer finalizeUpgradeTest(start, cma.testReport)
defer ready()
if skippable, ok := cma.test.(upgrades.Skippable); ok && skippable.Skip(cma.upgCtx) {
By("skipping test " + cma.test.Name())
cma.testReport.Skipped = "skipping test " + cma.test.Name()
return
}
defer cma.test.Teardown(cma.framework)
cma.test.Setup(cma.framework)
ready()
cma.test.Test(cma.framework, sem.StopCh, cma.upgradeType)
}
func finalizeUpgradeTest(start time.Time, tc *junit.TestCase) {
tc.Time = time.Since(start).Seconds()
r := recover()
if r == nil {
return
}
switch r := r.(type) {
case ginkgowrapper.FailurePanic:
tc.Failures = []*junit.Failure{
{
Message: r.Message,
Type: "Failure",
Value: fmt.Sprintf("%s\n\n%s", r.Message, r.FullStackTrace),
},
}
case ginkgowrapper.SkipPanic:
tc.Skipped = fmt.Sprintf("%s:%d %q", r.Filename, r.Line, r.Message)
default:
tc.Errors = []*junit.Error{
{
Message: fmt.Sprintf("%v", r),
Type: "Panic",
Value: fmt.Sprintf("%v", r),
},
}
}
}
func createUpgradeFrameworks(tests []upgrades.Test) map[string]*framework.Framework {
nsFilter := regexp.MustCompile("[^[:word:]-]+") // match anything that's not a word character or hyphen
testFrameworks := map[string]*framework.Framework{}
for _, t := range tests {
ns := nsFilter.ReplaceAllString(t.Name(), "-") // and replace with a single hyphen
ns = strings.Trim(ns, "-")
testFrameworks[t.Name()] = framework.NewDefaultFramework(ns)
}
return testFrameworks
}
func runUpgradeSuite(
f *framework.Framework,
tests []upgrades.Test,
testFrameworks map[string]*framework.Framework,
testSuite *junit.TestSuite,
upgCtx *upgrades.UpgradeContext,
upgradeType upgrades.UpgradeType,
upgradeFunc func(),
) {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
cm := chaosmonkey.New(upgradeFunc)
for _, t := range tests {
testCase := &junit.TestCase{
Name: t.Name(),
Classname: "upgrade_tests",
}
testSuite.TestCases = append(testSuite.TestCases, testCase)
cma := chaosMonkeyAdapter{
test: t,
testReport: testCase,
framework: testFrameworks[t.Name()],
upgradeType: upgradeType,
upgCtx: *upgCtx,
}
cm.Register(cma.Test)
}
start := time.Now()
defer func() {
testSuite.Update()
testSuite.Time = time.Since(start).Seconds()
if framework.TestContext.ReportDir != "" {
fname := filepath.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%supgrades.xml", framework.TestContext.ReportPrefix))
f, err := os.Create(fname)
if err != nil {
return
}
defer f.Close()
xml.NewEncoder(f).Encode(testSuite)
}
}()
cm.Do()
}
func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*upgrades.UpgradeContext, error) {
current, err := c.ServerVersion()
if err != nil {
return nil, err
}
curVer, err := version.ParseSemantic(current.String())
if err != nil {
return nil, err
}
upgCtx := &upgrades.UpgradeContext{
Versions: []upgrades.VersionContext{
{
Version: *curVer,
NodeImage: framework.TestContext.NodeOSDistro,
},
},
}
if len(upgradeTarget) == 0 {
return upgCtx, nil
}
next, err := framework.RealVersion(upgradeTarget)
if err != nil {
return nil, err
}
nextVer, err := version.ParseSemantic(next)
if err != nil {
return nil, err
}
upgCtx.Versions = append(upgCtx.Versions, upgrades.VersionContext{
Version: *nextVer,
NodeImage: framework.TestContext.UpgradeImage,
})
return upgCtx, nil
}

View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-cluster-lifecycle] "+text, body)
}

View File

@ -0,0 +1,241 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"os/exec"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
)
func addMasterReplica(zone string) error {
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
if err != nil {
return err
}
return nil
}
func removeMasterReplica(zone string) error {
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
if err != nil {
return err
}
return nil
}
func addWorkerNodes(zone string) error {
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
if err != nil {
return err
}
return nil
}
func removeWorkerNodes(zone string) error {
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
if err != nil {
return err
}
return nil
}
func verifyRCs(c clientset.Interface, ns string, names []string) {
for _, name := range names {
framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1))
}
}
func createNewRC(c clientset.Interface, ns string, name string) {
_, err := common.NewRCByName(c, ns, name, 1, nil)
framework.ExpectNoError(err)
}
func findRegionForZone(zone string) string {
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").CombinedOutput()
framework.ExpectNoError(err)
if string(region) == "" {
framework.Failf("Region not found; zone: %s", zone)
}
return string(region)
}
func findZonesForRegion(region string) []string {
output, err := exec.Command("gcloud", "compute", "zones", "list", "--filter=region="+region,
"--quiet", "--format=csv[no-heading](name)").CombinedOutput()
framework.ExpectNoError(err)
zones := strings.Split(string(output), "\n")
return zones
}
// removeZoneFromZones removes zone from zones slide.
// Please note that entries in zones can be repeated. In such situation only one replica is removed.
func removeZoneFromZones(zones []string, zone string) []string {
idx := -1
for j, z := range zones {
if z == zone {
idx = j
break
}
}
if idx >= 0 {
return zones[:idx+copy(zones[idx:], zones[idx+1:])]
}
return zones
}
var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
f := framework.NewDefaultFramework("ha-master")
var c clientset.Interface
var ns string
var additionalReplicaZones []string
var additionalNodesZones []string
var existingRCs []string
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
additionalReplicaZones = make([]string, 0)
existingRCs = make([]string, 0)
})
AfterEach(func() {
// Clean-up additional worker nodes if the test execution was broken.
for _, zone := range additionalNodesZones {
removeWorkerNodes(zone)
}
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
// Clean-up additional master replicas if the test execution was broken.
for _, zone := range additionalReplicaZones {
removeMasterReplica(zone)
}
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
})
type Action int
const (
None Action = iota
AddReplica
RemoveReplica
AddNodes
RemoveNodes
)
step := func(action Action, zone string) {
switch action {
case None:
case AddReplica:
framework.ExpectNoError(addMasterReplica(zone))
additionalReplicaZones = append(additionalReplicaZones, zone)
case RemoveReplica:
framework.ExpectNoError(removeMasterReplica(zone))
additionalReplicaZones = removeZoneFromZones(additionalReplicaZones, zone)
case AddNodes:
framework.ExpectNoError(addWorkerNodes(zone))
additionalNodesZones = append(additionalNodesZones, zone)
case RemoveNodes:
framework.ExpectNoError(removeWorkerNodes(zone))
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
}
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
// Verify that API server works correctly with HA master.
rcName := "ha-master-" + strconv.Itoa(len(existingRCs))
createNewRC(c, ns, rcName)
existingRCs = append(existingRCs, rcName)
verifyRCs(c, ns, existingRCs)
}
It("survive addition/removal replicas same zone [Serial][Disruptive]", func() {
zone := framework.TestContext.CloudConfig.Zone
step(None, "")
numAdditionalReplicas := 2
for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zone)
}
for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zone)
}
})
It("survive addition/removal replicas different zones [Serial][Disruptive]", func() {
zone := framework.TestContext.CloudConfig.Zone
region := findRegionForZone(zone)
zones := findZonesForRegion(region)
zones = removeZoneFromZones(zones, zone)
step(None, "")
// If numAdditionalReplicas is larger then the number of remaining zones in the region,
// we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones.
numAdditionalReplicas := 2
for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zones[i%len(zones)])
}
for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zones[i%len(zones)])
}
})
It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func() {
zone := framework.TestContext.CloudConfig.Zone
region := findRegionForZone(zone)
zones := findZonesForRegion(region)
zones = removeZoneFromZones(zones, zone)
step(None, "")
numAdditionalReplicas := 2
// Add worker nodes.
for i := 0; i < numAdditionalReplicas && i < len(zones); i++ {
step(AddNodes, zones[i])
}
// Add master repilcas.
//
// If numAdditionalReplicas is larger then the number of remaining zones in the region,
// we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones.
for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zones[i%len(zones)])
}
// Remove master repilcas.
for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zones[i%len(zones)])
}
// Remove worker nodes.
for i := 0; i < numAdditionalReplicas && i < len(zones); i++ {
step(RemoveNodes, zones[i])
}
})
})

View File

@ -0,0 +1,166 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"os/exec"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
repairTimeout = 20 * time.Minute
)
var _ = SIGDescribe("Node Auto Repairs [Slow] [Disruptive]", func() {
f := framework.NewDefaultFramework("lifecycle")
var c clientset.Interface
var originalNodes map[string]string
var nodeCount int
BeforeEach(func() {
framework.SkipUnlessProviderIs("gke")
c = f.ClientSet
nodeCount = 0
originalNodes = make(map[string]string)
for _, groupName := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
glog.Infof("Processing group %s", groupName)
nodes, err := framework.GetGroupNodes(groupName)
framework.ExpectNoError(err)
for _, node := range nodes {
nodeReady, err := isNodeReady(c, node)
framework.ExpectNoError(err)
Expect(nodeReady).To(Equal(true))
originalNodes[groupName] = node
nodeCount++
}
}
glog.Infof("Number of nodes %d", nodeCount)
})
AfterEach(func() {
framework.SkipUnlessProviderIs("gke")
By(fmt.Sprintf("Restoring initial size of the cluster"))
for groupName, nodeName := range originalNodes {
nodeReady, err := isNodeReady(c, nodeName)
framework.ExpectNoError(err)
if !nodeReady {
framework.ExpectNoError(recreateNode(nodeName, groupName))
}
}
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, defaultTimeout))
})
It("should repair node [Feature:NodeAutoRepairs]", func() {
framework.SkipUnlessProviderIs("gke")
framework.ExpectNoError(enableAutoRepair("default-pool"))
defer disableAutoRepair("default-pool")
readyNodes := getReadyNodes(c)
Expect(len(readyNodes)).NotTo(Equal(0))
nodeName := readyNodes[0].Name
framework.ExpectNoError(stopKubeletOnNode(nodeName))
By("Wait till node is unready.")
Expect(framework.WaitForNodeToBeNotReady(c, nodeName, defaultTimeout)).To(Equal(true))
By("Wait till node is repaired.")
Expect(framework.WaitForNodeToBeReady(c, nodeName, repairTimeout)).To(Equal(true))
})
})
func execCmd(args ...string) *exec.Cmd {
glog.Infof("Executing: %s", strings.Join(args, " "))
return exec.Command(args[0], args[1:]...)
}
func getReadyNodes(c clientset.Interface) []v1.Node {
nodeList := framework.GetReadySchedulableNodesOrDie(c)
return nodeList.Items
}
func enableAutoRepair(nodePool string) error {
glog.Infof("Using gcloud to enable auto repair for pool %s", nodePool)
output, err := execCmd("gcloud", "beta", "container", "node-pools", "update", nodePool,
"--enable-autorepair",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone,
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
if err != nil {
glog.Errorf("Failed to enable auto repair: %s", string(output))
return fmt.Errorf("failed to enable auto repair: %v", err)
}
return nil
}
func disableAutoRepair(nodePool string) error {
glog.Infof("Using gcloud to disable auto repair for pool %s", nodePool)
output, err := execCmd("gcloud", "beta", "container", "node-pools", "update", nodePool,
"--no-enable-autorepair",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone,
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
if err != nil {
glog.Errorf("Failed to disable auto repair: %s", string(output))
return fmt.Errorf("failed to disable auto repair: %v", err)
}
return nil
}
func stopKubeletOnNode(node string) error {
glog.Infof("Using gcloud to stop Kublet on node %s", node)
output, err := execCmd("gcloud", "compute", "ssh", node,
"--command=sudo systemctl stop kubelet-monitor.service && sudo systemctl stop kubelet.service",
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
if err != nil {
glog.Errorf("Failed to stop Kubelet: %v", string(output))
return fmt.Errorf("failed to stop Kubelet: %v", err)
}
return nil
}
func recreateNode(nodeName string, groupName string) error {
glog.Infof("Using gcloud to recreate node %s", nodeName)
//gcloud compute instance-groups managed recreate-instances gke-oneoff-default-pool-e4383993-grp --instances=gke-oneoff-default-pool-e4383993-81jm --zone=us-central1-c
output, err := execCmd("gcloud", "compute", "instance-groups", "managed", "recreate-instances", groupName,
"--instances="+nodeName,
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
if err != nil {
glog.Errorf("Failed to recreate node: %s", string(output))
return fmt.Errorf("failed to recreate node: %v", err)
}
return nil
}
func isNodeReady(c clientset.Interface, nodeName string) (bool, error) {
glog.Infof("Check if node %s is ready ", nodeName)
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
result := framework.IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
glog.Infof("Node %s is ready: %t", nodeName, result)
return result, nil
}

318
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go generated vendored Normal file
View File

@ -0,0 +1,318 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"strings"
"sync"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// How long a node is allowed to go from "Ready" to "NotReady" after a
// reboot is issued before the test is considered failed.
rebootNodeNotReadyTimeout = 2 * time.Minute
// How long a node is allowed to go from "NotReady" to "Ready" after a
// reboot is issued and it is found to be "NotReady" before the test is
// considered failed.
rebootNodeReadyAgainTimeout = 5 * time.Minute
// How long pods have to be "ready" after the reboot.
rebootPodReadyAgainTimeout = 5 * time.Minute
)
var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
var f *framework.Framework
BeforeEach(func() {
// These tests requires SSH to nodes, so the provider check should be identical to there
// (the limiting factor is the implementation of util.go's framework.GetSigner(...)).
// Cluster must support node reboot
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
})
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
// Most of the reboot tests just make sure that addon/system pods are running, so dump
// events for the kube-system namespace on failures
namespaceName := metav1.NamespaceSystem
By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, e := range events.Items {
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests
// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
// was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
})
f = framework.NewDefaultFramework("reboot")
It("each node by ordering clean reboot and ensure they function upon restart", func() {
// clean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil)
})
It("each node by ordering unclean reboot and ensure they function upon restart", func() {
// unclean shutdown and restart
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
})
It("each node by triggering kernel panic and ensure they function upon restart", func() {
// kernel panic
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
})
It("each node by switching off the network interface and ensure they function upon switch on", func() {
// switch the network interface off for a while to simulate a network outage
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && (sudo ifdown eth0 || sudo ip link set eth0 down) && sleep 120 && (sudo ifup eth0 || sudo ip link set eth0 up)' >/dev/null 2>&1 &", nil)
})
It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
// tell the firewall to drop all inbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
// We still accept packages send from localhost to prevent monit from restarting kubelet.
tmpLogPath := "/tmp/drop-inbound.log"
testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath))
})
It("each node by dropping all outbound packets for a while and ensure they function afterwards", func() {
// tell the firewall to drop all outbound packets for a while
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
// We still accept packages send to localhost to prevent monit from restarting kubelet.
tmpLogPath := "/tmp/drop-outbound.log"
testReboot(f.ClientSet, dropPacketsScript("OUTPUT", tmpLogPath), catLogHook(tmpLogPath))
})
})
func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
// Get all nodes, and kick off the test on each.
nodelist := framework.GetReadySchedulableNodesOrDie(c)
if hook != nil {
defer func() {
framework.Logf("Executing termination hook on nodes")
hook(framework.TestContext.Provider, nodelist)
}()
}
result := make([]bool, len(nodelist.Items))
wg := sync.WaitGroup{}
wg.Add(len(nodelist.Items))
failed := false
for ix := range nodelist.Items {
go func(ix int) {
defer wg.Done()
n := nodelist.Items[ix]
result[ix] = rebootNode(c, framework.TestContext.Provider, n.ObjectMeta.Name, rebootCmd)
if !result[ix] {
failed = true
}
}(ix)
}
// Wait for all to finish and check the final result.
wg.Wait()
if failed {
for ix := range nodelist.Items {
n := nodelist.Items[ix]
if !result[ix] {
framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
}
}
framework.Failf("Test failed; at least one node failed to reboot in the time given.")
}
}
func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) {
printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container"
if previous {
prefix = "Retrieving log for the last terminated container"
}
if err != nil {
framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
} else {
framework.Logf("%s %s:\n%s\n", prefix, id, log)
}
}
podNameSet := sets.NewString(podNames...)
for _, p := range pods {
if p.Namespace != ns {
continue
}
if !podNameSet.Has(p.Name) {
continue
}
if ok, _ := testutils.PodRunningReady(p); ok {
continue
}
framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
log, err := framework.GetPodLogs(c, p.Namespace, p.Name, container.Name)
printFn(cIdentifer, log, err, false)
// Get log from the previous container.
if container.RestartCount > 0 {
printFn(cIdentifer, log, err, true)
}
}
}
}
// rebootNode takes node name on provider through the following steps using c:
// - ensures the node is ready
// - ensures all pods on the node are running and ready
// - reboots the node (by executing rebootCmd over ssh)
// - ensures the node reaches some non-ready state
// - ensures the node becomes ready again
// - ensures all pods on the node become running and ready again
//
// It returns true through result only if all of the steps pass; at the first
// failed step, it will return false through result and not run the rest.
func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Setup
ns := metav1.NamespaceSystem
ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
defer ps.Stop()
// Get the node initially.
framework.Logf("Getting %s", name)
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
return false
}
// Node sanity check: ensure it is "ready".
if !framework.WaitForNodeToBeReady(c, name, framework.NodeReadyInitialTimeout) {
return false
}
// Get all the pods on the node that don't have liveness probe set.
// Liveness probe may cause restart of a pod during node reboot, and the pod may not be running.
pods := ps.List()
podNames := []string{}
for _, p := range pods {
probe := false
for _, c := range p.Spec.Containers {
if c.LivenessProbe != nil {
probe = true
break
}
}
if !probe {
podNames = append(podNames, p.ObjectMeta.Name)
}
}
framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
// For each pod, we do a sanity check to ensure it's running / healthy
// or succeeded now, as that's what we'll be checking later.
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(c, ns, podNames, pods)
return false
}
// Reboot the node.
if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil {
framework.Logf("Error while issuing ssh command: %v", err)
return false
}
// Wait for some kind of "not ready" status.
if !framework.WaitForNodeToBeNotReady(c, name, rebootNodeNotReadyTimeout) {
return false
}
// Wait for some kind of "ready" status.
if !framework.WaitForNodeToBeReady(c, name, rebootNodeReadyAgainTimeout) {
return false
}
// Ensure all of the pods that we found on this node before the reboot are
// running / healthy, or succeeded.
if !framework.CheckPodsRunningReadyOrSucceeded(c, ns, podNames, rebootPodReadyAgainTimeout) {
newPods := ps.List()
printStatusAndLogsForNotReadyPods(c, ns, podNames, newPods)
return false
}
framework.Logf("Reboot successful on node %s", name)
return true
}
type terminationHook func(provider string, nodes *v1.NodeList)
func catLogHook(logPath string) terminationHook {
return func(provider string, nodes *v1.NodeList) {
for _, n := range nodes.Items {
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
if _, err := framework.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
framework.Logf("Error while issuing ssh command: %v", err)
}
}
}
}
func dropPacketsScript(chainName, logPath string) string {
return strings.Replace(fmt.Sprintf(`
nohup sh -c '
set -x
sleep 10
while true; do sudo iptables -I ${CHAIN} 1 -s 127.0.0.1 -j ACCEPT && break; done
while true; do sudo iptables -I ${CHAIN} 2 -j DROP && break; done
date
sleep 120
while true; do sudo iptables -D ${CHAIN} -j DROP && break; done
while true; do sudo iptables -D ${CHAIN} -s 127.0.0.1 -j ACCEPT && break; done
' >%v 2>&1 &
`, logPath), "${CHAIN}", chainName, -1)
}

View File

@ -0,0 +1,165 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const resizeNodeReadyTimeout = 2 * time.Minute
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
rc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
*(rc.Spec.Replicas) = replicas
_, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(rc)
return err
}
var _ = SIGDescribe("Nodes [Disruptive]", func() {
f := framework.NewDefaultFramework("resize-nodes")
var systemPodsNo int32
var c clientset.Interface
var ns string
ignoreLabels := framework.ImagePullerLabels
var group string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup
}
})
// Slow issue #13323 (8 min)
Describe("Resize [Slow]", func() {
var skipped bool
BeforeEach(func() {
skipped = true
framework.SkipUnlessProviderIs("gce", "gke", "aws")
framework.SkipUnlessNodeCountIsAtLeast(2)
skipped = false
})
AfterEach(func() {
if skipped {
return
}
By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
// being closed, so we sleep.
//
// TODO(cjcullen) reduce this sleep (#19314)
if framework.ProviderIs("gke") {
By("waiting 5 minutes for all dead tunnels to be dropped")
time.Sleep(5 * time.Minute)
}
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health.
By("waiting for system pods to successfully restart")
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
By("waiting for image prepulling pods to complete")
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
})
It("should be able to delete nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
err = framework.ResizeGroup(group, replicas-1)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForGroupSize(group, replicas-1)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForReadyNodes(c, int(replicas-1), 10*time.Minute)
Expect(err).NotTo(HaveOccurred())
By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
"the now non-existent node and the RC to recreate it")
time.Sleep(time.Minute)
By("verifying whether the pods from the removed node are recreated")
err = framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())
})
// TODO: Bug here - testName is not correct
It("should be able to add nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing cluster size to %d", replicas+1))
err = framework.ResizeGroup(group, replicas+1)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForGroupSize(group, replicas+1)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForReadyNodes(c, int(replicas+1), 10*time.Minute)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
err = resizeRC(c, ns, name, replicas+1)
Expect(err).NotTo(HaveOccurred())
err = framework.VerifyPods(c, ns, name, true, replicas+1)
Expect(err).NotTo(HaveOccurred())
})
})
})

198
vendor/k8s.io/kubernetes/test/e2e/lifecycle/restart.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
if !kubepod.IsMirrorPod(p) {
return false
}
return p.Spec.RestartPolicy != v1.RestartPolicyAlways
}
func filterIrrelevantPods(pods []*v1.Pod) []*v1.Pod {
var results []*v1.Pod
for _, p := range pods {
if isNotRestartAlwaysMirrorPod(p) {
// Mirror pods with restart policy == Never will not get
// recreated if they are deleted after the pods have
// terminated. For now, we discount such pods.
// https://github.com/kubernetes/kubernetes/issues/34003
continue
}
results = append(results, p)
}
return results
}
var _ = SIGDescribe("Restart [Disruptive]", func() {
f := framework.NewDefaultFramework("restart")
var ps *testutils.PodStore
var originalNodeNames []string
var originalPodNames []string
var numNodes int
var systemNamespace string
BeforeEach(func() {
// This test requires the ability to restart all nodes, so the provider
// check must be identical to that call.
framework.SkipUnlessProviderIs("gce", "gke")
ps = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
numNodes = framework.TestContext.CloudConfig.NumNodes
systemNamespace = metav1.NamespaceSystem
By("ensuring all nodes are ready")
var err error
originalNodeNames, err = framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, numNodes)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Got the following nodes before restart: %v", originalNodeNames)
By("ensuring all pods are running and ready")
allPods := ps.List()
pods := filterIrrelevantPods(allPods)
originalPodNames = make([]string, len(pods))
for i, p := range pods {
originalPodNames[i] = p.ObjectMeta.Name
}
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods)
framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
}
})
AfterEach(func() {
if ps != nil {
ps.Stop()
}
})
It("should restart all nodes and ensure all nodes and pods recover", func() {
By("restarting all of the nodes")
err := restartNodes(f, originalNodeNames)
Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart")
nodeNamesAfter, err := framework.CheckNodesReady(f.ClientSet, framework.RestartNodeReadyAgainTimeout, numNodes)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter)
// Make sure that we have the same number of nodes. We're not checking
// that the names match because that's implementation specific.
By("ensuring the same number of nodes exist after the restart")
if len(originalNodeNames) != len(nodeNamesAfter) {
framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
len(originalNodeNames), len(nodeNamesAfter))
}
// Make sure that we have the same number of pods. We're not checking
// that the names match because they are recreated with different names
// across node restarts.
By("ensuring the same number of pods are running and ready after restart")
podCheckStart := time.Now()
podNamesAfter, err := waitForNPods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred())
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
pods := ps.List()
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods)
framework.Failf("At least one pod wasn't running and ready after the restart.")
}
})
})
// waitForNPods tries to list pods using c until it finds expect of them,
// returning their names if it can do so before timeout.
func waitForNPods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
// Loop until we find expect pods or timeout is passed.
var pods []*v1.Pod
var errLast error
found := wait.Poll(framework.Poll, timeout, func() (bool, error) {
allPods := ps.List()
pods = filterIrrelevantPods(allPods)
if len(pods) != expect {
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
framework.Logf("Error getting pods: %v", errLast)
return false, nil
}
return true, nil
}) == nil
// Extract the names of all found pods.
podNames := make([]string, len(pods))
for i, p := range pods {
podNames[i] = p.ObjectMeta.Name
}
if !found {
return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
expect, timeout, errLast)
}
return podNames, nil
}
func restartNodes(f *framework.Framework, nodeNames []string) error {
// List old boot IDs.
oldBootIDs := make(map[string]string)
for _, name := range nodeNames {
node, err := f.ClientSet.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting node info before reboot: %s", err)
}
oldBootIDs[name] = node.Status.NodeInfo.BootID
}
// Reboot the nodes.
args := []string{
"compute",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
"instances",
"reset",
}
args = append(args, nodeNames...)
args = append(args, fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone))
stdout, stderr, err := framework.RunCmd("gcloud", args...)
if err != nil {
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
// Wait for their boot IDs to change.
for _, name := range nodeNames {
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
node, err := f.ClientSet.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err)
}
return node.Status.NodeInfo.BootID != oldBootIDs[name], nil
}); err != nil {
return fmt.Errorf("error waiting for node %s boot ID to change: %s", name, err)
}
}
return nil
}