mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
21
vendor/k8s.io/kubernetes/test/e2e/lifecycle/BUILD
generated
vendored
21
vendor/k8s.io/kubernetes/test/e2e/lifecycle/BUILD
generated
vendored
@ -12,6 +12,8 @@ go_library(
|
||||
"cluster_upgrade.go",
|
||||
"framework.go",
|
||||
"ha_master.go",
|
||||
"kubelet_security.go",
|
||||
"node_lease.go",
|
||||
"reboot.go",
|
||||
"resize_nodes.go",
|
||||
"restart.go",
|
||||
@ -20,7 +22,16 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/chaosmonkey:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
@ -34,14 +45,6 @@ go_library(
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
12
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/BUILD
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/BUILD
generated
vendored
@ -14,16 +14,16 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/lifecycle/bootstrap",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/lifecycle:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/bootstrap/token/api:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_signer.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_signer.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/lifecycle"
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/lifecycle"
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/util.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/lifecycle/bootstrap/util.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
bootstrapapi "k8s.io/client-go/tools/bootstrap/token/api"
|
||||
bootstrapapi "k8s.io/cluster-bootstrap/token/api"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
|
49
vendor/k8s.io/kubernetes/test/e2e/lifecycle/cluster_upgrade.go
generated
vendored
49
vendor/k8s.io/kubernetes/test/e2e/lifecycle/cluster_upgrade.go
generated
vendored
@ -18,6 +18,7 @@ package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -26,8 +27,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/chaosmonkey"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
@ -39,6 +40,11 @@ import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var (
|
||||
upgradeTarget = flag.String("upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||
upgradeImage = flag.String("upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
|
||||
)
|
||||
|
||||
var upgradeTests = []upgrades.Test{
|
||||
&upgrades.ServiceUpgradeTest{},
|
||||
&upgrades.SecretUpgradeTest{},
|
||||
@ -52,6 +58,7 @@ var upgradeTests = []upgrades.Test{
|
||||
&apps.DaemonSetUpgradeTest{},
|
||||
&upgrades.IngressUpgradeTest{},
|
||||
&upgrades.AppArmorUpgradeTest{},
|
||||
&storage.VolumeModeDowngradeTest{},
|
||||
}
|
||||
|
||||
var gpuUpgradeTests = []upgrades.Test{
|
||||
@ -89,7 +96,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
||||
Describe("master upgrade", func() {
|
||||
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "Master upgrade"}
|
||||
@ -112,7 +119,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
|
||||
Describe("node upgrade", func() {
|
||||
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "Node upgrade"}
|
||||
@ -125,7 +132,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, nodeUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.NodeUpgrade, upgradeFunc)
|
||||
@ -134,7 +141,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
|
||||
Describe("cluster upgrade", func() {
|
||||
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
|
||||
@ -146,7 +153,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
@ -163,7 +170,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
|
||||
|
||||
Describe("cluster downgrade", func() {
|
||||
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
|
||||
@ -175,7 +182,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
|
||||
defer finalizeUpgradeTest(start, clusterDowngradeTest)
|
||||
// Yes this really is a downgrade. And nodes must downgrade first.
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
@ -268,7 +275,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
|
||||
Describe("master upgrade", func() {
|
||||
It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "GPU master upgrade"}
|
||||
@ -286,7 +293,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
})
|
||||
Describe("cluster upgrade", func() {
|
||||
It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "GPU cluster upgrade"}
|
||||
@ -298,7 +305,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
@ -306,7 +313,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
})
|
||||
Describe("cluster downgrade", func() {
|
||||
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "GPU cluster downgrade"}
|
||||
@ -316,7 +323,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, gpuDowngradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
@ -334,7 +341,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
|
||||
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
|
||||
framework.KubeDescribe("stateful upgrade", func() {
|
||||
It("should maintain a functioning cluster", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "Stateful upgrade"}
|
||||
@ -346,7 +353,7 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
@ -365,7 +372,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
||||
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
|
||||
|
||||
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "kube-proxy upgrade"}
|
||||
@ -381,7 +388,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, true))
|
||||
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
@ -392,7 +399,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
||||
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
|
||||
|
||||
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "kube-proxy downgrade"}
|
||||
@ -407,7 +414,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
||||
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
|
||||
// Yes this really is a downgrade. And nodes must downgrade first.
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, false))
|
||||
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
@ -496,7 +503,7 @@ func runUpgradeSuite(
|
||||
upgradeType upgrades.UpgradeType,
|
||||
upgradeFunc func(),
|
||||
) {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
cm := chaosmonkey.New(upgradeFunc)
|
||||
@ -569,7 +576,7 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u
|
||||
|
||||
upgCtx.Versions = append(upgCtx.Versions, upgrades.VersionContext{
|
||||
Version: *nextVer,
|
||||
NodeImage: framework.TestContext.UpgradeImage,
|
||||
NodeImage: *upgradeImage,
|
||||
})
|
||||
|
||||
return upgCtx, nil
|
||||
|
85
vendor/k8s.io/kubernetes/test/e2e/lifecycle/kubelet_security.go
generated
vendored
Normal file
85
vendor/k8s.io/kubernetes/test/e2e/lifecycle/kubelet_security.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||
f := framework.NewDefaultFramework("kubelet-security")
|
||||
|
||||
var node *v1.Node
|
||||
var nodeName string
|
||||
|
||||
BeforeEach(func() {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
Expect(len(nodes.Items)).NotTo(BeZero())
|
||||
node = &nodes.Items[0]
|
||||
nodeName = node.Name
|
||||
})
|
||||
|
||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
||||
It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
Expect(statusCode).NotTo(Equal(http.StatusOK))
|
||||
})
|
||||
It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var statusCode int
|
||||
result.StatusCode(&statusCode)
|
||||
Expect(statusCode).NotTo(Equal(http.StatusOK))
|
||||
})
|
||||
|
||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
|
||||
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
|
||||
for _, port := range disabledPorts {
|
||||
It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
|
||||
portClosedTest(f, node, port)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// checks whether the target port is closed
|
||||
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
|
||||
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
|
||||
Expect(len(nodeAddrs)).NotTo(BeZero())
|
||||
|
||||
for _, addr := range nodeAddrs {
|
||||
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
framework.Failf("port %d is not disabled", port)
|
||||
}
|
||||
}
|
||||
}
|
163
vendor/k8s.io/kubernetes/test/e2e/lifecycle/node_lease.go
generated
vendored
Normal file
163
vendor/k8s.io/kubernetes/test/e2e/lifecycle/node_lease.go
generated
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("[Feature:NodeLease][NodeAlphaFeature:NodeLease][Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("node-lease-test")
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
Expect(err).To(BeNil())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
} else {
|
||||
group = framework.TestContext.CloudConfig.NodeInstanceGroup
|
||||
}
|
||||
})
|
||||
|
||||
Describe("NodeLease deletion", func() {
|
||||
var skipped bool
|
||||
|
||||
BeforeEach(func() {
|
||||
skipped = true
|
||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||
skipped = false
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if skipped {
|
||||
return
|
||||
}
|
||||
|
||||
By("restoring the original node instance group size")
|
||||
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
||||
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
|
||||
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
|
||||
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
|
||||
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
|
||||
// being closed, so we sleep.
|
||||
//
|
||||
// TODO(cjcullen) reduce this sleep (#19314)
|
||||
if framework.ProviderIs("gke") {
|
||||
By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||
time.Sleep(5 * time.Minute)
|
||||
}
|
||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
|
||||
if err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
|
||||
framework.Failf("Couldn't restore the original cluster size: %v", err)
|
||||
}
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
By("waiting for system pods to successfully restart")
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
Expect(err).To(BeNil())
|
||||
})
|
||||
|
||||
It("node lease should be deleted when corresponding node is deleted", func() {
|
||||
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||
err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By("verify node lease exists for every nodes")
|
||||
originalNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
Expect(len(originalNodes.Items)).To(Equal(framework.TestContext.CloudConfig.NumNodes))
|
||||
|
||||
Eventually(func() error {
|
||||
pass := true
|
||||
for _, node := range originalNodes.Items {
|
||||
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
|
||||
pass = false
|
||||
}
|
||||
}
|
||||
if pass {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("some node lease is not ready")
|
||||
}, 1*time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||
By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
||||
err = framework.ResizeGroup(group, targetNumNodes)
|
||||
Expect(err).To(BeNil())
|
||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||
Expect(err).To(BeNil())
|
||||
err = framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
|
||||
Expect(err).To(BeNil())
|
||||
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
Expect(len(targetNodes.Items)).To(Equal(int(targetNumNodes)))
|
||||
|
||||
By("verify node lease is deleted for the deleted node")
|
||||
var deletedNodeName string
|
||||
for _, originalNode := range originalNodes.Items {
|
||||
originalNodeName := originalNode.ObjectMeta.Name
|
||||
for _, targetNode := range targetNodes.Items {
|
||||
if originalNodeName == targetNode.ObjectMeta.Name {
|
||||
continue
|
||||
}
|
||||
}
|
||||
deletedNodeName = originalNodeName
|
||||
break
|
||||
}
|
||||
Expect(deletedNodeName).NotTo(Equal(""))
|
||||
Eventually(func() error {
|
||||
if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil {
|
||||
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)
|
||||
}
|
||||
return nil
|
||||
}, 1*time.Minute, 5*time.Second).Should(BeNil())
|
||||
|
||||
By("verify node leases still exist for remaining nodes")
|
||||
Eventually(func() error {
|
||||
for _, node := range targetNodes.Items {
|
||||
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, 1*time.Minute, 5*time.Second).Should(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
4
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go
generated
vendored
@ -98,13 +98,13 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||
It("each node by ordering unclean reboot and ensure they function upon restart", func() {
|
||||
// unclean shutdown and restart
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
|
||||
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
||||
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
||||
})
|
||||
|
||||
It("each node by triggering kernel panic and ensure they function upon restart", func() {
|
||||
// kernel panic
|
||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
|
||||
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
||||
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
||||
})
|
||||
|
||||
It("each node by switching off the network interface and ensure they function upon switch on", func() {
|
||||
|
7
vendor/k8s.io/kubernetes/test/e2e/lifecycle/resize_nodes.go
generated
vendored
7
vendor/k8s.io/kubernetes/test/e2e/lifecycle/resize_nodes.go
generated
vendored
@ -45,13 +45,12 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
var systemPodsNo int32
|
||||
var c clientset.Interface
|
||||
var ns string
|
||||
ignoreLabels := framework.ImagePullerLabels
|
||||
var group string
|
||||
|
||||
BeforeEach(func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)
|
||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
@ -104,10 +103,8 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
// the cluster is restored to health.
|
||||
By("waiting for system pods to successfully restart")
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)
|
||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("waiting for image prepulling pods to complete")
|
||||
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
|
||||
})
|
||||
|
||||
It("should be able to delete nodes", func() {
|
||||
|
Reference in New Issue
Block a user