mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
70
vendor/k8s.io/kubernetes/test/e2e/lifecycle/cluster_upgrade.go
generated
vendored
70
vendor/k8s.io/kubernetes/test/e2e/lifecycle/cluster_upgrade.go
generated
vendored
@ -54,6 +54,10 @@ var upgradeTests = []upgrades.Test{
|
||||
&upgrades.AppArmorUpgradeTest{},
|
||||
}
|
||||
|
||||
var gpuUpgradeTests = []upgrades.Test{
|
||||
&upgrades.NvidiaGPUUpgradeTest{},
|
||||
}
|
||||
|
||||
var statefulsetUpgradeTests = []upgrades.Test{
|
||||
&upgrades.MySqlUpgradeTest{},
|
||||
&upgrades.EtcdUpgradeTest{},
|
||||
@ -256,6 +260,72 @@ var _ = SIGDescribe("ingress Downgrade [Feature:IngressDowngrade]", func() {
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
||||
f := framework.NewDefaultFramework("gpu-upgrade")
|
||||
|
||||
// Create the frameworks here because we can only create them
|
||||
// in a "Describe".
|
||||
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
|
||||
Describe("master upgrade", func() {
|
||||
It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "GPU master upgrade"}
|
||||
gpuUpgradeTest := &junit.TestCase{Name: "[sig-node] gpu-master-upgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest)
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, gpuUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.MasterUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
Describe("cluster upgrade", func() {
|
||||
It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "GPU cluster upgrade"}
|
||||
gpuUpgradeTest := &junit.TestCase{Name: "[sig-node] gpu-cluster-upgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, gpuUpgradeTest)
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, gpuUpgradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
Describe("cluster downgrade", func() {
|
||||
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
|
||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
testSuite := &junit.TestSuite{Name: "GPU cluster downgrade"}
|
||||
gpuDowngradeTest := &junit.TestCase{Name: "[sig-node] gpu-cluster-downgrade", Classname: "upgrade_tests"}
|
||||
testSuite.TestCases = append(testSuite.TestCases, gpuDowngradeTest)
|
||||
upgradeFunc := func() {
|
||||
start := time.Now()
|
||||
defer finalizeUpgradeTest(start, gpuDowngradeTest)
|
||||
target := upgCtx.Versions[1].Version.String()
|
||||
framework.ExpectNoError(framework.NodeUpgrade(f, target, framework.TestContext.UpgradeImage))
|
||||
framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
|
||||
framework.ExpectNoError(framework.MasterUpgrade(target))
|
||||
framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
|
||||
}
|
||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func() {
|
||||
f := framework.NewDefaultFramework("stateful-upgrade")
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/lifecycle/ha_master.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/lifecycle/ha_master.go
generated
vendored
@ -78,7 +78,7 @@ func createNewRC(c clientset.Interface, ns string, name string) {
|
||||
}
|
||||
|
||||
func findRegionForZone(zone string) string {
|
||||
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").CombinedOutput()
|
||||
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output()
|
||||
framework.ExpectNoError(err)
|
||||
if string(region) == "" {
|
||||
framework.Failf("Region not found; zone: %s", zone)
|
||||
@ -88,7 +88,7 @@ func findRegionForZone(zone string) string {
|
||||
|
||||
func findZonesForRegion(region string) []string {
|
||||
output, err := exec.Command("gcloud", "compute", "zones", "list", "--filter=region="+region,
|
||||
"--quiet", "--format=csv[no-heading](name)").CombinedOutput()
|
||||
"--quiet", "--format=csv[no-heading](name)").Output()
|
||||
framework.ExpectNoError(err)
|
||||
zones := strings.Split(string(output), "\n")
|
||||
return zones
|
||||
|
6
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/e2e/lifecycle/reboot.go
generated
vendored
@ -219,7 +219,11 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
|
||||
func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
|
||||
// Setup
|
||||
ns := metav1.NamespaceSystem
|
||||
ps := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
|
||||
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't initialize pod store: %v", err)
|
||||
return false
|
||||
}
|
||||
defer ps.Stop()
|
||||
|
||||
// Get the node initially.
|
||||
|
48
vendor/k8s.io/kubernetes/test/e2e/lifecycle/resize_nodes.go
generated
vendored
48
vendor/k8s.io/kubernetes/test/e2e/lifecycle/resize_nodes.go
generated
vendored
@ -30,8 +30,6 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const resizeNodeReadyTimeout = 2 * time.Minute
|
||||
|
||||
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
|
||||
rc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -65,6 +63,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
|
||||
// Slow issue #13323 (8 min)
|
||||
Describe("Resize [Slow]", func() {
|
||||
var originalNodeCount int32
|
||||
var skipped bool
|
||||
|
||||
BeforeEach(func() {
|
||||
@ -98,7 +97,8 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
if err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
|
||||
|
||||
if err := framework.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
|
||||
framework.Failf("Couldn't restore the original cluster size: %v", err)
|
||||
}
|
||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||
@ -114,17 +114,20 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// Create a replication controller for a service that serves its hostname.
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-delete-node"
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
originalNodeCount = int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
|
||||
err = framework.ResizeGroup(group, replicas-1)
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||
By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
||||
err = framework.ResizeGroup(group, targetNumNodes)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForGroupSize(group, replicas-1)
|
||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReadyNodes(c, int(replicas-1), 10*time.Minute)
|
||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
|
||||
@ -132,7 +135,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
time.Sleep(time.Minute)
|
||||
|
||||
By("verifying whether the pods from the removed node are recreated")
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
@ -142,23 +145,26 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||
name := "my-hostname-add-node"
|
||||
common.NewSVCByName(c, ns, name)
|
||||
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
|
||||
common.NewRCByName(c, ns, name, replicas, nil)
|
||||
err := framework.VerifyPods(c, ns, name, true, replicas)
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
originalNodeCount = int32(numNodes)
|
||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("increasing cluster size to %d", replicas+1))
|
||||
err = framework.ResizeGroup(group, replicas+1)
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
|
||||
By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes))
|
||||
err = framework.ResizeGroup(group, targetNumNodes)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForGroupSize(group, replicas+1)
|
||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForReadyNodes(c, int(replicas+1), 10*time.Minute)
|
||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
|
||||
err = resizeRC(c, ns, name, replicas+1)
|
||||
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
||||
err = resizeRC(c, ns, name, originalNodeCount+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.VerifyPods(c, ns, name, true, replicas+1)
|
||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
71
vendor/k8s.io/kubernetes/test/e2e/lifecycle/restart.go
generated
vendored
71
vendor/k8s.io/kubernetes/test/e2e/lifecycle/restart.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -55,10 +56,18 @@ func filterIrrelevantPods(pods []*v1.Pod) []*v1.Pod {
|
||||
return results
|
||||
}
|
||||
|
||||
func nodeNames(nodes []v1.Node) []string {
|
||||
result := make([]string, 0, len(nodes))
|
||||
for i := range nodes {
|
||||
result = append(result, nodes[i].Name)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
f := framework.NewDefaultFramework("restart")
|
||||
var ps *testutils.PodStore
|
||||
var originalNodeNames []string
|
||||
var originalNodes []v1.Node
|
||||
var originalPodNames []string
|
||||
var numNodes int
|
||||
var systemNamespace string
|
||||
@ -67,15 +76,17 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
// This test requires the ability to restart all nodes, so the provider
|
||||
// check must be identical to that call.
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
ps = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||
numNodes = framework.TestContext.CloudConfig.NumNodes
|
||||
var err error
|
||||
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
systemNamespace = metav1.NamespaceSystem
|
||||
|
||||
By("ensuring all nodes are ready")
|
||||
var err error
|
||||
originalNodeNames, err = framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, numNodes)
|
||||
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Got the following nodes before restart: %v", originalNodeNames)
|
||||
framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
|
||||
|
||||
By("ensuring all pods are running and ready")
|
||||
allPods := ps.List()
|
||||
@ -99,20 +110,20 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
||||
|
||||
It("should restart all nodes and ensure all nodes and pods recover", func() {
|
||||
By("restarting all of the nodes")
|
||||
err := restartNodes(f, originalNodeNames)
|
||||
err := common.RestartNodes(f.ClientSet, originalNodes)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("ensuring all nodes are ready after the restart")
|
||||
nodeNamesAfter, err := framework.CheckNodesReady(f.ClientSet, framework.RestartNodeReadyAgainTimeout, numNodes)
|
||||
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Got the following nodes after restart: %v", nodeNamesAfter)
|
||||
framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
|
||||
|
||||
// Make sure that we have the same number of nodes. We're not checking
|
||||
// that the names match because that's implementation specific.
|
||||
By("ensuring the same number of nodes exist after the restart")
|
||||
if len(originalNodeNames) != len(nodeNamesAfter) {
|
||||
if len(originalNodes) != len(nodesAfter) {
|
||||
framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
|
||||
len(originalNodeNames), len(nodeNamesAfter))
|
||||
len(originalNodes), len(nodesAfter))
|
||||
}
|
||||
|
||||
// Make sure that we have the same number of pods. We're not checking
|
||||
@ -158,41 +169,3 @@ func waitForNPods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]
|
||||
}
|
||||
return podNames, nil
|
||||
}
|
||||
|
||||
func restartNodes(f *framework.Framework, nodeNames []string) error {
|
||||
// List old boot IDs.
|
||||
oldBootIDs := make(map[string]string)
|
||||
for _, name := range nodeNames {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node info before reboot: %s", err)
|
||||
}
|
||||
oldBootIDs[name] = node.Status.NodeInfo.BootID
|
||||
}
|
||||
// Reboot the nodes.
|
||||
args := []string{
|
||||
"compute",
|
||||
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
|
||||
"instances",
|
||||
"reset",
|
||||
}
|
||||
args = append(args, nodeNames...)
|
||||
args = append(args, fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone))
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
|
||||
}
|
||||
// Wait for their boot IDs to change.
|
||||
for _, name := range nodeNames {
|
||||
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error getting node info after reboot: %s", err)
|
||||
}
|
||||
return node.Status.NodeInfo.BootID != oldBootIDs[name], nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("error waiting for node %s boot ID to change: %s", name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user