vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -16,6 +16,7 @@ go_library(
"ingress.go",
"kube_proxy_migration.go",
"mysql.go",
"nvidia-gpu.go",
"secrets.go",
"services.go",
"sysctl.go",
@ -23,11 +24,11 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/upgrades",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
@ -38,6 +39,7 @@ go_library(
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",

View File

@ -19,7 +19,7 @@ package upgrades
import (
"fmt"
extensions "k8s.io/api/extensions/v1beta1"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
@ -54,11 +54,11 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
nginxImage := imageutils.GetE2EImage(imageutils.NginxSlim)
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
By(fmt.Sprintf("Creating a deployment %q with 1 replica in namespace %q", deploymentName, ns))
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, extensions.RollingUpdateDeploymentStrategyType)
d := framework.NewDeployment(deploymentName, int32(1), map[string]string{"test": "upgrade"}, "nginx", nginxImage, apps.RollingUpdateDeploymentStrategyType)
deployment, err := deploymentClient.Create(d)
framework.ExpectNoError(err)
@ -81,7 +81,7 @@ func (t *DeploymentUpgradeTest) Setup(f *framework.Framework) {
// Trigger a new rollout so that we have some history.
By(fmt.Sprintf("Triggering a new rollout for deployment %q", deploymentName))
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *extensions.Deployment) {
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = "updated-name"
})
framework.ExpectNoError(err)
@ -121,8 +121,8 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
c := f.ClientSet
ns := f.Namespace.Name
deploymentClient := c.ExtensionsV1beta1().Deployments(ns)
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
deploymentClient := c.AppsV1().Deployments(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
deployment, err := deploymentClient.Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
@ -157,7 +157,7 @@ func (t *DeploymentUpgradeTest) Test(f *framework.Framework, done <-chan struct{
// Verify the upgraded deployment is active by scaling up the deployment by 1
By(fmt.Sprintf("Scaling up replicaset of deployment %q by 1", deploymentName))
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *extensions.Deployment) {
_, err = framework.UpdateDeploymentWithRetries(c, ns, deploymentName, func(deployment *apps.Deployment) {
*deployment.Spec.Replicas = *deployment.Spec.Replicas + 1
})
framework.ExpectNoError(err)

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
extensions "k8s.io/api/extensions/v1beta1"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/e2e/framework"
@ -54,7 +54,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
By(fmt.Sprintf("Creating replicaset %s in namespace %s", rsName, ns))
replicaSet := framework.NewReplicaSet(rsName, ns, 1, map[string]string{"test": "upgrade"}, "nginx", nginxImage)
rs, err := c.ExtensionsV1beta1().ReplicaSets(ns).Create(replicaSet)
rs, err := c.AppsV1().ReplicaSets(ns).Create(replicaSet)
framework.ExpectNoError(err)
By(fmt.Sprintf("Waiting for replicaset %s to have all of its replicas ready", rsName))
@ -67,7 +67,7 @@ func (r *ReplicaSetUpgradeTest) Setup(f *framework.Framework) {
func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
c := f.ClientSet
ns := f.Namespace.Name
rsClient := c.ExtensionsV1beta1().ReplicaSets(ns)
rsClient := c.AppsV1().ReplicaSets(ns)
// Block until upgrade is done
By(fmt.Sprintf("Waiting for upgrade to finish before checking replicaset %s", rsName))
@ -86,7 +86,7 @@ func (r *ReplicaSetUpgradeTest) Test(f *framework.Framework, done <-chan struct{
// Verify the upgraded RS is active by scaling up the RS to scaleNum and ensuring all pods are Ready
By(fmt.Sprintf("Scaling up replicaset %s to %d", rsName, scaleNum))
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *extensions.ReplicaSet) {
_, err = framework.UpdateReplicaSetWithRetries(c, ns, rsName, func(rs *apps.ReplicaSet) {
*rs.Spec.Replicas = scaleNum
})
framework.ExpectNoError(err)

View File

@ -48,7 +48,8 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
500, /* cpuLimit */
200, /* memLimit */
f.ClientSet,
f.InternalClientset)
f.InternalClientset,
f.ScalesGetter)
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
t.rc,
20, /* targetCPUUtilizationPercent */

View File

@ -29,6 +29,7 @@ import (
compute "google.golang.org/api/compute/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -45,6 +46,7 @@ type IngressUpgradeTest struct {
httpClient *http.Client
ip string
ipName string
skipSSLCheck bool
}
// GCPResourceStore keeps track of the GCP resources spun up by an ingress.
@ -97,7 +99,7 @@ func (t *IngressUpgradeTest) Setup(f *framework.Framework) {
framework.IngressStaticIPKey: t.ipName,
framework.IngressAllowHTTPKey: "false",
}, map[string]string{})
t.jig.AddHTTPS("tls-secret", "ingress.test.com")
t.jig.SetHTTPS("tls-secret", "ingress.test.com")
By("waiting for Ingress to come up with ip: " + t.ip)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%v/%v", t.ip, path), host, framework.LoadBalancerPollTimeout, t.jig.PollInterval, t.httpClient, false))
@ -145,6 +147,26 @@ func (t *IngressUpgradeTest) Teardown(f *framework.Framework) {
framework.ExpectNoError(t.gceController.CleanupGCEIngressController())
}
// Skip checks if the test or part of the test should be skipped.
func (t *IngressUpgradeTest) Skip(upgCtx UpgradeContext) bool {
sslNameChangeVersion, err := version.ParseGeneric("v1.10.0")
framework.ExpectNoError(err)
var hasVersionBelow, hasVersionAboveOrEqual bool
for _, v := range upgCtx.Versions {
if v.Version.LessThan(sslNameChangeVersion) {
hasVersionBelow = true
continue
}
hasVersionAboveOrEqual = true
}
// Skip SSL certificates check if k8s version changes between 1.10-
// and 1.10+ because the naming scheme has changed.
if hasVersionBelow && hasVersionAboveOrEqual {
t.skipSSLCheck = true
}
return false
}
func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}, testDuringDisruption bool) {
if testDuringDisruption {
By("continuously hitting the Ingress IP")
@ -177,13 +199,22 @@ func (t *IngressUpgradeTest) verify(f *framework.Framework, done <-chan struct{}
postUpgradeResourceStore := &GCPResourceStore{}
t.populateGCPResourceStore(postUpgradeResourceStore)
// Ignore certain fields in compute.Firewall that we know will change
// due to the upgrade/downgrade.
// TODO(rramkumar): Remove this once glbc 0.9.8 is released.
t.resourceStore.Fw.Allowed = nil
t.resourceStore.Fw.SourceRanges = nil
postUpgradeResourceStore.Fw.Allowed = nil
postUpgradeResourceStore.Fw.SourceRanges = nil
// Stub out the number of instances as that is out of Ingress controller's control.
for _, ig := range t.resourceStore.IgList {
ig.Size = 0
}
for _, ig := range postUpgradeResourceStore.IgList {
ig.Size = 0
}
// Stub out compute.SslCertificates in case we know it will change during an upgrade/downgrade.
if t.skipSSLCheck {
t.resourceStore.SslList = nil
postUpgradeResourceStore.SslList = nil
}
// TODO(rramkumar): Remove this when GLBC v1.2.0 is released.
t.resourceStore.BeList = nil
postUpgradeResourceStore.BeList = nil
framework.ExpectNoError(compareGCPResourceStores(t.resourceStore, postUpgradeResourceStore, func(v1 reflect.Value, v2 reflect.Value) error {
i1 := v1.Interface()

View File

@ -0,0 +1,112 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package upgrades
import (
"regexp"
"time"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/scheduling"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// NvidiaGPUUpgradeTest tests that gpu resource is available before and after
// a cluster upgrade.
type NvidiaGPUUpgradeTest struct {
}
func (NvidiaGPUUpgradeTest) Name() string { return "nvidia-gpu-upgrade [sig-node] [sig-scheduling]" }
// Setup creates a job requesting gpu.
func (t *NvidiaGPUUpgradeTest) Setup(f *framework.Framework) {
scheduling.SetupNVIDIAGPUNode(f, false)
By("Creating a job requesting gpu")
t.startJob(f)
}
// Test waits for the upgrade to complete, and then verifies that the
// cuda pod started by the gpu job can successfully finish.
func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
<-done
By("Verifying gpu job success")
t.verifyJobPodSuccess(f)
if upgrade == MasterUpgrade {
// MasterUpgrade should be totally hitless.
job, err := framework.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred())
Expect(job.Status.Failed).To(BeZero(), "Job pods failed during master upgrade: %v", job.Status.Failed)
}
}
// Teardown cleans up any remaining resources.
func (t *NvidiaGPUUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
}
// startJob creates a job that requests gpu and runs a simple cuda container.
func (t *NvidiaGPUUpgradeTest) startJob(f *framework.Framework) {
var activeSeconds int64 = 3600
// Specifies 100 completions to make sure the job life spans across the upgrade.
testJob := framework.NewTestJob("succeed", "cuda-add", v1.RestartPolicyAlways, 1, 100, &activeSeconds, 6)
testJob.Spec.Template.Spec = v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: "vector-addition",
Image: imageutils.GetE2EImage(imageutils.CudaVectorAdd),
Command: []string{"/bin/sh", "-c", "./vectorAdd && sleep 60"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
framework.NVIDIAGPUResourceName: *resource.NewQuantity(1, resource.DecimalSI),
},
},
},
},
}
ns := f.Namespace.Name
_, err := framework.CreateJob(f.ClientSet, ns, testJob)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created job %v", testJob)
By("Waiting for gpu job pod start")
err = framework.WaitForAllJobPodsRunning(f.ClientSet, ns, testJob.Name, 1)
Expect(err).NotTo(HaveOccurred())
By("Done with gpu job pod start")
}
// verifyJobPodSuccess verifies that the started cuda pod successfully passes.
func (t *NvidiaGPUUpgradeTest) verifyJobPodSuccess(f *framework.Framework) {
// Wait for client pod to complete.
ns := f.Namespace.Name
err := framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, "cuda-add", 1)
Expect(err).NotTo(HaveOccurred())
pods, err := framework.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
Expect(err).NotTo(HaveOccurred())
createdPod := pods.Items[0].Name
framework.Logf("Created pod %v", createdPod)
f.PodClient().WaitForSuccess(createdPod, 5*time.Minute)
logs, err := framework.GetPodLogs(f.ClientSet, ns, createdPod, "vector-addition")
framework.ExpectNoError(err, "Should be able to get pod logs")
framework.Logf("Got pod logs: %v", logs)
regex := regexp.MustCompile("PASSED")
Expect(regex.MatchString(logs)).To(BeTrue())
}

View File

@ -62,11 +62,8 @@ func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
PVSource: *t.pvSource,
Prebind: nil,
}
pvcConfig := framework.PersistentVolumeClaimConfig{
Annotations: map[string]string{
v1.BetaStorageClassAnnotation: "",
},
}
emptyStorageClass := ""
pvcConfig := framework.PersistentVolumeClaimConfig{StorageClassName: &emptyStorageClass}
By("Creating the PV and PVC")
t.pv, t.pvc, err = framework.CreatePVPVC(f.ClientSet, pvConfig, pvcConfig, ns, true)

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/test/e2e/framework"
@ -123,9 +122,6 @@ func sysctlTestPod(name string, sysctls map[string]string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: map[string]string{
v1.SysctlsPodAnnotationKey: v1helper.PodAnnotationsFromSysctls(sysctlList),
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
@ -136,6 +132,9 @@ func sysctlTestPod(name string, sysctls map[string]string) *v1.Pod {
},
},
RestartPolicy: v1.RestartPolicyNever,
SecurityContext: &v1.PodSecurityContext{
Sysctls: sysctlList,
},
},
}
}