mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 18:53:35 +00:00
Fresh dep ensure
This commit is contained in:
154
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
154
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD
generated
vendored
@ -1,10 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -12,14 +8,14 @@ go_library(
|
||||
"authorizer_util.go",
|
||||
"cleanup.go",
|
||||
"crd_util.go",
|
||||
"create.go",
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"firewall_util.go",
|
||||
"flake_reporting_util.go",
|
||||
"framework.go",
|
||||
"get-kubemark-resource-usage.go",
|
||||
"google_compute.go",
|
||||
"gpu_util.go",
|
||||
"ingress_utils.go",
|
||||
"jobs_util.go",
|
||||
"kubelet_stats.go",
|
||||
"log_size_monitoring.go",
|
||||
@ -29,6 +25,7 @@ go_library(
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"profile_gatherer.go",
|
||||
"provider.go",
|
||||
"psp_util.go",
|
||||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
@ -52,107 +49,101 @@ go_library(
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/deployment/util:go_default_library",
|
||||
"//pkg/controller/job:go_default_library",
|
||||
"//pkg/controller/nodelifecycle:go_default_library",
|
||||
"//pkg/controller/service:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/apis/config:go_default_library",
|
||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/events:go_default_library",
|
||||
"//pkg/kubelet/metrics:go_default_library",
|
||||
"//pkg/kubelet/sysctl:go_default_library",
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/cache:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/batch/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega/types:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/github.com/spf13/viper:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/golang.org/x/net/websocket:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
|
||||
"//vendor/k8s.io/api/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/batch/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd/api:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/retry:go_default_library",
|
||||
"//vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -168,15 +159,18 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/framework/config:all-srcs",
|
||||
"//test/e2e/framework/ginkgowrapper:all-srcs",
|
||||
"//test/e2e/framework/ingress:all-srcs",
|
||||
"//test/e2e/framework/metrics:all-srcs",
|
||||
"//test/e2e/framework/podlogs:all-srcs",
|
||||
"//test/e2e/framework/providers/aws:all-srcs",
|
||||
"//test/e2e/framework/providers/azure:all-srcs",
|
||||
"//test/e2e/framework/providers/gce:all-srcs",
|
||||
"//test/e2e/framework/providers/kubemark:all-srcs",
|
||||
"//test/e2e/framework/testfiles:all-srcs",
|
||||
"//test/e2e/framework/timer:all-srcs",
|
||||
"//test/e2e/framework/viperconfig:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["firewall_util_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/authorizer_util.go
generated
vendored
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/klog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -62,7 +62,7 @@ func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviews
|
||||
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
|
||||
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
|
||||
if apierrors.IsNotFound(err) {
|
||||
fmt.Printf("SubjectAccessReview endpoint is missing\n")
|
||||
klog.Info("SubjectAccessReview endpoint is missing")
|
||||
time.Sleep(1 * time.Second)
|
||||
return true, nil
|
||||
}
|
||||
@ -94,7 +94,7 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
|
||||
klog.Errorf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
klog.Errorf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,7 +140,7 @@ func IsRBACEnabled(f *Framework) bool {
|
||||
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
Logf("No ClusteRoles found; assuming RBAC is disabled.")
|
||||
Logf("No ClusterRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/config/BUILD
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/test/e2e/framework/config/BUILD
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["config.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/config",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["config_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
234
vendor/k8s.io/kubernetes/test/e2e/framework/config/config.go
generated
vendored
Normal file
234
vendor/k8s.io/kubernetes/test/e2e/framework/config/config.go
generated
vendored
Normal file
@ -0,0 +1,234 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package config simplifies the declaration of configuration options.
|
||||
// Right now the implementation maps them directly command line
|
||||
// flags. When combined with test/e2e/framework/viper in a test suite,
|
||||
// those flags then can also be read from a config file.
|
||||
//
|
||||
// Instead of defining flags one-by-one, developers annotate a
|
||||
// structure with tags and then call a single function. This is the
|
||||
// same approach as in https://godoc.org/github.com/jessevdk/go-flags,
|
||||
// but implemented so that a test suite can continue to use the normal
|
||||
// "flag" package.
|
||||
//
|
||||
// For example, a file storage/csi.go might define:
|
||||
//
|
||||
// var scaling struct {
|
||||
// NumNodes int `default:"1" description:"number of nodes to run on"`
|
||||
// Master string
|
||||
// }
|
||||
// _ = config.AddOptions(&scaling, "storage.csi.scaling")
|
||||
//
|
||||
// This defines the following command line flags:
|
||||
//
|
||||
// -storage.csi.scaling.numNodes=<int> - number of nodes to run on (default: 1)
|
||||
// -storage.csi.scaling.master=<string>
|
||||
//
|
||||
// All fields in the structure must be exported and have one of the following
|
||||
// types (same as in the `flag` package):
|
||||
// - bool
|
||||
// - time.Duration
|
||||
// - float64
|
||||
// - string
|
||||
// - int
|
||||
// - int64
|
||||
// - uint
|
||||
// - uint64
|
||||
// - and/or nested or embedded structures containing those basic types.
|
||||
//
|
||||
// Each basic entry may have a tag with these optional keys:
|
||||
//
|
||||
// usage: additional explanation of the option
|
||||
// default: the default value, in the same format as it would
|
||||
// be given on the command line and true/false for
|
||||
// a boolean
|
||||
//
|
||||
// The names of the final configuration options are a combination of an
|
||||
// optional common prefix for all options in the structure and the
|
||||
// name of the fields, concatenated with a dot. To get names that are
|
||||
// consistent with the command line flags defined by `ginkgo`, the
|
||||
// initial character of each field name is converted to lower case.
|
||||
//
|
||||
// There is currently no support for aliases, so renaming the fields
|
||||
// or the common prefix will be visible to users of the test suite and
|
||||
// may breaks scripts which use the old names.
|
||||
//
|
||||
// The variable will be filled with the actual values by the test
|
||||
// suite before running tests. Beware that the code which registers
|
||||
// Ginkgo tests cannot use those config options, because registering
|
||||
// tests and options both run before the E2E test suite handles
|
||||
// parameters.
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// CommandLine is the flag set that AddOptions adds to. Usually this
|
||||
// is the same as the default in the flag package, but can also be
|
||||
// something else (for example during testing).
|
||||
var CommandLine = flag.CommandLine
|
||||
|
||||
// AddOptions analyzes the options value and creates the necessary
|
||||
// flags to populate it.
|
||||
//
|
||||
// The prefix can be used to root the options deeper in the overall
|
||||
// set of options, with a dot separating different levels.
|
||||
//
|
||||
// The function always returns true, to enable this simplified
|
||||
// registration of options:
|
||||
// _ = AddOptions(...)
|
||||
//
|
||||
// It panics when it encounters an error, like unsupported types
|
||||
// or option name conflicts.
|
||||
func AddOptions(options interface{}, prefix string) bool {
|
||||
optionsType := reflect.TypeOf(options)
|
||||
if optionsType == nil {
|
||||
panic("options parameter without a type - nil?!")
|
||||
}
|
||||
if optionsType.Kind() != reflect.Ptr || optionsType.Elem().Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("need a pointer to a struct, got instead: %T", options))
|
||||
}
|
||||
addStructFields(optionsType.Elem(), reflect.Indirect(reflect.ValueOf(options)), prefix)
|
||||
return true
|
||||
}
|
||||
|
||||
func addStructFields(structType reflect.Type, structValue reflect.Value, prefix string) {
|
||||
for i := 0; i < structValue.NumField(); i++ {
|
||||
entry := structValue.Field(i)
|
||||
addr := entry.Addr()
|
||||
structField := structType.Field(i)
|
||||
name := structField.Name
|
||||
r, n := utf8.DecodeRuneInString(name)
|
||||
name = string(unicode.ToLower(r)) + name[n:]
|
||||
usage := structField.Tag.Get("usage")
|
||||
def := structField.Tag.Get("default")
|
||||
if prefix != "" {
|
||||
name = prefix + "." + name
|
||||
}
|
||||
if structField.PkgPath != "" {
|
||||
panic(fmt.Sprintf("struct entry %q not exported", name))
|
||||
}
|
||||
ptr := addr.Interface()
|
||||
if structField.Anonymous {
|
||||
// Entries in embedded fields are treated like
|
||||
// entries, in the struct itself, i.e. we add
|
||||
// them with the same prefix.
|
||||
addStructFields(structField.Type, entry, prefix)
|
||||
continue
|
||||
}
|
||||
if structField.Type.Kind() == reflect.Struct {
|
||||
// Add nested options.
|
||||
addStructFields(structField.Type, entry, name)
|
||||
continue
|
||||
}
|
||||
// We could switch based on structField.Type. Doing a
|
||||
// switch after getting an interface holding the
|
||||
// pointer to the entry has the advantage that we
|
||||
// immediately have something that we can add as flag
|
||||
// variable.
|
||||
//
|
||||
// Perhaps generics will make this entire switch redundant someday...
|
||||
switch ptr := ptr.(type) {
|
||||
case *bool:
|
||||
var defValue bool
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.BoolVar(ptr, name, defValue, usage)
|
||||
case *time.Duration:
|
||||
var defValue time.Duration
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.DurationVar(ptr, name, defValue, usage)
|
||||
case *float64:
|
||||
var defValue float64
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.Float64Var(ptr, name, defValue, usage)
|
||||
case *string:
|
||||
CommandLine.StringVar(ptr, name, def, usage)
|
||||
case *int:
|
||||
var defValue int
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.IntVar(ptr, name, defValue, usage)
|
||||
case *int64:
|
||||
var defValue int64
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.Int64Var(ptr, name, defValue, usage)
|
||||
case *uint:
|
||||
var defValue uint
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.UintVar(ptr, name, defValue, usage)
|
||||
case *uint64:
|
||||
var defValue uint64
|
||||
parseDefault(&defValue, name, def)
|
||||
CommandLine.Uint64Var(ptr, name, defValue, usage)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported struct entry type %q: %T", name, entry.Interface()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseDefault is necessary because "flag" wants the default in the
|
||||
// actual type and cannot take a string. It would be nice to reuse the
|
||||
// existing code for parsing from the "flag" package, but it isn't
|
||||
// exported.
|
||||
func parseDefault(value interface{}, name, def string) {
|
||||
if def == "" {
|
||||
return
|
||||
}
|
||||
checkErr := func(err error, value interface{}) {
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid default %q for %T entry %s: %s", def, value, name, err))
|
||||
}
|
||||
}
|
||||
switch value := value.(type) {
|
||||
case *bool:
|
||||
v, err := strconv.ParseBool(def)
|
||||
checkErr(err, *value)
|
||||
*value = v
|
||||
case *time.Duration:
|
||||
v, err := time.ParseDuration(def)
|
||||
checkErr(err, *value)
|
||||
*value = v
|
||||
case *float64:
|
||||
v, err := strconv.ParseFloat(def, 64)
|
||||
checkErr(err, *value)
|
||||
*value = v
|
||||
case *int:
|
||||
v, err := strconv.Atoi(def)
|
||||
checkErr(err, *value)
|
||||
*value = v
|
||||
case *int64:
|
||||
v, err := strconv.ParseInt(def, 0, 64)
|
||||
checkErr(err, *value)
|
||||
*value = v
|
||||
case *uint:
|
||||
v, err := strconv.ParseUint(def, 0, strconv.IntSize)
|
||||
checkErr(err, *value)
|
||||
*value = uint(v)
|
||||
case *uint64:
|
||||
v, err := strconv.ParseUint(def, 0, 64)
|
||||
checkErr(err, *value)
|
||||
*value = v
|
||||
default:
|
||||
panic(fmt.Sprintf("%q: setting defaults not supported for type %T", name, value))
|
||||
}
|
||||
}
|
258
vendor/k8s.io/kubernetes/test/e2e/framework/config/config_test.go
generated
vendored
Normal file
258
vendor/k8s.io/kubernetes/test/e2e/framework/config/config_test.go
generated
vendored
Normal file
@ -0,0 +1,258 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInt(t *testing.T) {
|
||||
CommandLine = flag.NewFlagSet("test", 0)
|
||||
var context struct {
|
||||
Number int `default:"5" usage:"some number"`
|
||||
}
|
||||
require.NotPanics(t, func() {
|
||||
AddOptions(&context, "")
|
||||
})
|
||||
require.Equal(t, []simpleFlag{
|
||||
{
|
||||
name: "number",
|
||||
usage: "some number",
|
||||
defValue: "5",
|
||||
}},
|
||||
allFlags(CommandLine))
|
||||
assert.Equal(t, 5, context.Number)
|
||||
}
|
||||
|
||||
func TestLower(t *testing.T) {
|
||||
CommandLine = flag.NewFlagSet("test", 0)
|
||||
var context struct {
|
||||
Ähem string
|
||||
MixedCase string
|
||||
}
|
||||
require.NotPanics(t, func() {
|
||||
AddOptions(&context, "")
|
||||
})
|
||||
require.Equal(t, []simpleFlag{
|
||||
{
|
||||
name: "mixedCase",
|
||||
},
|
||||
{
|
||||
name: "ähem",
|
||||
},
|
||||
},
|
||||
allFlags(CommandLine))
|
||||
}
|
||||
|
||||
func TestPrefix(t *testing.T) {
|
||||
CommandLine = flag.NewFlagSet("test", 0)
|
||||
var context struct {
|
||||
Number int `usage:"some number"`
|
||||
}
|
||||
require.NotPanics(t, func() {
|
||||
AddOptions(&context, "some.prefix")
|
||||
})
|
||||
require.Equal(t, []simpleFlag{
|
||||
{
|
||||
name: "some.prefix.number",
|
||||
usage: "some number",
|
||||
defValue: "0",
|
||||
}},
|
||||
allFlags(CommandLine))
|
||||
}
|
||||
|
||||
func TestRecursion(t *testing.T) {
|
||||
CommandLine = flag.NewFlagSet("test", 0)
|
||||
type Nested struct {
|
||||
Number1 int `usage:"embedded number"`
|
||||
}
|
||||
var context struct {
|
||||
Nested
|
||||
A struct {
|
||||
B struct {
|
||||
C struct {
|
||||
Number2 int `usage:"some number"`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
require.NotPanics(t, func() {
|
||||
AddOptions(&context, "")
|
||||
})
|
||||
require.Equal(t, []simpleFlag{
|
||||
{
|
||||
name: "a.b.c.number2",
|
||||
usage: "some number",
|
||||
defValue: "0",
|
||||
},
|
||||
{
|
||||
name: "number1",
|
||||
usage: "embedded number",
|
||||
defValue: "0",
|
||||
},
|
||||
},
|
||||
allFlags(CommandLine))
|
||||
}
|
||||
|
||||
func TestPanics(t *testing.T) {
|
||||
assert.PanicsWithValue(t, `invalid default "a" for int entry prefix.number: strconv.Atoi: parsing "a": invalid syntax`, func() {
|
||||
var context struct {
|
||||
Number int `default:"a"`
|
||||
}
|
||||
AddOptions(&context, "prefix")
|
||||
})
|
||||
|
||||
assert.PanicsWithValue(t, `invalid default "10000000000000000000" for int entry prefix.number: strconv.Atoi: parsing "10000000000000000000": value out of range`, func() {
|
||||
var context struct {
|
||||
Number int `default:"10000000000000000000"`
|
||||
}
|
||||
AddOptions(&context, "prefix")
|
||||
})
|
||||
|
||||
assert.PanicsWithValue(t, `options parameter without a type - nil?!`, func() {
|
||||
AddOptions(nil, "")
|
||||
})
|
||||
|
||||
assert.PanicsWithValue(t, `need a pointer to a struct, got instead: *int`, func() {
|
||||
number := 0
|
||||
AddOptions(&number, "")
|
||||
})
|
||||
|
||||
assert.PanicsWithValue(t, `struct entry "prefix.number" not exported`, func() {
|
||||
var context struct {
|
||||
number int
|
||||
}
|
||||
AddOptions(&context, "prefix")
|
||||
})
|
||||
|
||||
assert.PanicsWithValue(t, `unsupported struct entry type "prefix.someNumber": config.MyInt`, func() {
|
||||
type MyInt int
|
||||
var context struct {
|
||||
SomeNumber MyInt
|
||||
}
|
||||
AddOptions(&context, "prefix")
|
||||
})
|
||||
}
|
||||
|
||||
func TestTypes(t *testing.T) {
|
||||
CommandLine = flag.NewFlagSet("test", 0)
|
||||
type Context struct {
|
||||
Bool bool `default:"true"`
|
||||
Duration time.Duration `default:"1ms"`
|
||||
Float64 float64 `default:"1.23456789"`
|
||||
String string `default:"hello world"`
|
||||
Int int `default:"-1" usage:"some number"`
|
||||
Int64 int64 `default:"-1234567890123456789"`
|
||||
Uint uint `default:"1"`
|
||||
Uint64 uint64 `default:"1234567890123456789"`
|
||||
}
|
||||
var context Context
|
||||
require.NotPanics(t, func() {
|
||||
AddOptions(&context, "")
|
||||
})
|
||||
require.Equal(t, []simpleFlag{
|
||||
{
|
||||
name: "bool",
|
||||
defValue: "true",
|
||||
isBool: true,
|
||||
},
|
||||
{
|
||||
name: "duration",
|
||||
defValue: "1ms",
|
||||
},
|
||||
{
|
||||
name: "float64",
|
||||
defValue: "1.23456789",
|
||||
},
|
||||
{
|
||||
name: "int",
|
||||
usage: "some number",
|
||||
defValue: "-1",
|
||||
},
|
||||
{
|
||||
name: "int64",
|
||||
defValue: "-1234567890123456789",
|
||||
},
|
||||
{
|
||||
name: "string",
|
||||
defValue: "hello world",
|
||||
},
|
||||
{
|
||||
name: "uint",
|
||||
defValue: "1",
|
||||
},
|
||||
{
|
||||
name: "uint64",
|
||||
defValue: "1234567890123456789",
|
||||
},
|
||||
},
|
||||
allFlags(CommandLine))
|
||||
assert.Equal(t,
|
||||
Context{true, time.Millisecond, 1.23456789, "hello world",
|
||||
-1, -1234567890123456789, 1, 1234567890123456789,
|
||||
},
|
||||
context,
|
||||
"default values must match")
|
||||
require.NoError(t, CommandLine.Parse([]string{
|
||||
"-int", "-2",
|
||||
"-int64", "-9123456789012345678",
|
||||
"-uint", "2",
|
||||
"-uint64", "9123456789012345678",
|
||||
"-string", "pong",
|
||||
"-float64", "-1.23456789",
|
||||
"-bool=false",
|
||||
"-duration=1s",
|
||||
}))
|
||||
assert.Equal(t,
|
||||
Context{false, time.Second, -1.23456789, "pong",
|
||||
-2, -9123456789012345678, 2, 9123456789012345678,
|
||||
},
|
||||
context,
|
||||
"parsed values must match")
|
||||
}
|
||||
|
||||
func allFlags(fs *flag.FlagSet) []simpleFlag {
|
||||
var flags []simpleFlag
|
||||
fs.VisitAll(func(f *flag.Flag) {
|
||||
s := simpleFlag{
|
||||
name: f.Name,
|
||||
usage: f.Usage,
|
||||
defValue: f.DefValue,
|
||||
}
|
||||
type boolFlag interface {
|
||||
flag.Value
|
||||
IsBoolFlag() bool
|
||||
}
|
||||
if fv, ok := f.Value.(boolFlag); ok && fv.IsBoolFlag() {
|
||||
s.isBool = true
|
||||
}
|
||||
flags = append(flags, s)
|
||||
})
|
||||
return flags
|
||||
}
|
||||
|
||||
type simpleFlag struct {
|
||||
name string
|
||||
usage string
|
||||
defValue string
|
||||
isBool bool
|
||||
}
|
71
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
71
vendor/k8s.io/kubernetes/test/e2e/framework/crd_util.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
crdclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/testserver"
|
||||
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
@ -35,25 +35,23 @@ type TestCrd struct {
|
||||
Name string
|
||||
Kind string
|
||||
ApiGroup string
|
||||
ApiVersion string
|
||||
Versions []apiextensionsv1beta1.CustomResourceDefinitionVersion
|
||||
ApiExtensionClient *crdclientset.Clientset
|
||||
Crd *apiextensionsv1beta1.CustomResourceDefinition
|
||||
DynamicClient dynamic.ResourceInterface
|
||||
DynamicClients map[string]dynamic.ResourceInterface
|
||||
CleanUp CleanCrdFn
|
||||
}
|
||||
|
||||
// CreateTestCRD creates a new CRD specifically for the calling test.
|
||||
func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
func CreateMultiVersionTestCRD(f *Framework, group string, apiVersions []apiextensionsv1beta1.CustomResourceDefinitionVersion, conversionWebhook *apiextensionsv1beta1.WebhookClientConfig) (*TestCrd, error) {
|
||||
suffix := randomSuffix()
|
||||
name := fmt.Sprintf("e2e-test-%s-%s-crd", f.BaseName, suffix)
|
||||
kind := fmt.Sprintf("E2e-test-%s-%s-crd", f.BaseName, suffix)
|
||||
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
|
||||
apiVersion := "v1"
|
||||
testcrd := &TestCrd{
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
ApiGroup: group,
|
||||
ApiVersion: apiVersion,
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
ApiGroup: group,
|
||||
Versions: apiVersions,
|
||||
}
|
||||
|
||||
// Creating a custom resource definition for use by assorted tests.
|
||||
@ -75,21 +73,33 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
|
||||
crd := newCRDForTest(testcrd)
|
||||
|
||||
if conversionWebhook != nil {
|
||||
crd.Spec.Conversion = &apiextensionsv1beta1.CustomResourceConversion{
|
||||
Strategy: "Webhook",
|
||||
WebhookClientConfig: conversionWebhook,
|
||||
}
|
||||
}
|
||||
|
||||
//create CRD and waits for the resource to be recognized and available.
|
||||
crd, err = testserver.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
|
||||
crd, err = fixtures.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to create CustomResourceDefinition: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Version, Resource: crd.Spec.Names.Plural}
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(f.Namespace.Name)
|
||||
resourceClients := map[string]dynamic.ResourceInterface{}
|
||||
for _, v := range crd.Spec.Versions {
|
||||
if v.Served {
|
||||
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: v.Name, Resource: crd.Spec.Names.Plural}
|
||||
resourceClients[v.Name] = dynamicClient.Resource(gvr).Namespace(f.Namespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
testcrd.ApiExtensionClient = apiExtensionClient
|
||||
testcrd.Crd = crd
|
||||
testcrd.DynamicClient = resourceClient
|
||||
testcrd.DynamicClients = resourceClients
|
||||
testcrd.CleanUp = func() error {
|
||||
err := testserver.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionClient)
|
||||
if err != nil {
|
||||
Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
|
||||
}
|
||||
@ -98,13 +108,26 @@ func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
return testcrd, nil
|
||||
}
|
||||
|
||||
// CreateTestCRD creates a new CRD specifically for the calling test.
|
||||
func CreateTestCRD(f *Framework) (*TestCrd, error) {
|
||||
group := fmt.Sprintf("%s-crd-test.k8s.io", f.BaseName)
|
||||
apiVersions := []apiextensionsv1beta1.CustomResourceDefinitionVersion{
|
||||
{
|
||||
Name: "v1",
|
||||
Served: true,
|
||||
Storage: true,
|
||||
},
|
||||
}
|
||||
return CreateMultiVersionTestCRD(f, group, apiVersions, nil)
|
||||
}
|
||||
|
||||
// newCRDForTest generates a CRD definition for the test
|
||||
func newCRDForTest(testcrd *TestCrd) *apiextensionsv1beta1.CustomResourceDefinition {
|
||||
return &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testcrd.GetMetaName()},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: testcrd.ApiGroup,
|
||||
Version: testcrd.ApiVersion,
|
||||
Group: testcrd.ApiGroup,
|
||||
Versions: testcrd.Versions,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: testcrd.GetPluralName(),
|
||||
Singular: testcrd.Name,
|
||||
@ -130,3 +153,17 @@ func (c *TestCrd) GetPluralName() string {
|
||||
func (c *TestCrd) GetListName() string {
|
||||
return c.Name + "List"
|
||||
}
|
||||
|
||||
func (c *TestCrd) GetAPIVersions() []string {
|
||||
ret := []string{}
|
||||
for _, v := range c.Versions {
|
||||
if v.Served {
|
||||
ret = append(ret, v.Name)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *TestCrd) GetV1DynamicClient() dynamic.ResourceInterface {
|
||||
return c.DynamicClients["v1"]
|
||||
}
|
||||
|
603
vendor/k8s.io/kubernetes/test/e2e/framework/create.go
generated
vendored
Normal file
603
vendor/k8s.io/kubernetes/test/e2e/framework/create.go
generated
vendored
Normal file
@ -0,0 +1,603 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
// LoadFromManifests loads .yaml or .json manifest files and returns
|
||||
// all items that it finds in them. It supports all items for which
|
||||
// there is a factory registered in Factories and .yaml files with
|
||||
// multiple items separated by "---". Files are accessed via the
|
||||
// "testfiles" package, which means they can come from a file system
|
||||
// or be built into the binary.
|
||||
//
|
||||
// LoadFromManifests has some limitations:
|
||||
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
|
||||
// and silently ignored
|
||||
// - the latest stable API version for each item is used, regardless of what
|
||||
// is specified in the manifest files
|
||||
func (f *Framework) LoadFromManifests(files ...string) ([]interface{}, error) {
|
||||
var items []interface{}
|
||||
err := visitManifests(func(data []byte) error {
|
||||
// Ignore any additional fields for now, just determine what we have.
|
||||
var what What
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, &what); err != nil {
|
||||
return errors.Wrap(err, "decode TypeMeta")
|
||||
}
|
||||
|
||||
factory := Factories[what]
|
||||
if factory == nil {
|
||||
return errors.Errorf("item of type %+v not supported", what)
|
||||
}
|
||||
|
||||
object := factory.New()
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, object); err != nil {
|
||||
return errors.Wrapf(err, "decode %+v", what)
|
||||
}
|
||||
items = append(items, object)
|
||||
return nil
|
||||
}, files...)
|
||||
|
||||
return items, err
|
||||
}
|
||||
|
||||
func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
for _, fileName := range files {
|
||||
data, err := testfiles.Read(fileName)
|
||||
if err != nil {
|
||||
Failf("reading manifest file: %v", err)
|
||||
}
|
||||
|
||||
// Split at the "---" separator before working on
|
||||
// individual item. Only works for .yaml.
|
||||
//
|
||||
// We need to split ourselves because we need access
|
||||
// to each original chunk of data for
|
||||
// runtime.DecodeInto. kubectl has its own
|
||||
// infrastructure for this, but that is a lot of code
|
||||
// with many dependencies.
|
||||
items := bytes.Split(data, []byte("\n---"))
|
||||
|
||||
for _, item := range items {
|
||||
if err := cb(item); err != nil {
|
||||
return errors.Wrap(err, fileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PatchItems modifies the given items in place such that each test
|
||||
// gets its own instances, to avoid conflicts between different tests
|
||||
// and between tests and normal deployments.
|
||||
//
|
||||
// This is done by:
|
||||
// - creating namespaced items inside the test's namespace
|
||||
// - changing the name of non-namespaced items like ClusterRole
|
||||
//
|
||||
// PatchItems has some limitations:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func (f *Framework) PatchItems(items ...interface{}) error {
|
||||
for _, item := range items {
|
||||
// Uncomment when debugging the loading and patching of items.
|
||||
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
|
||||
if err := f.patchItemRecursively(item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateItems creates the items. Each of them must be an API object
|
||||
// of a type that is registered in Factory.
|
||||
//
|
||||
// It returns either a cleanup function or an error, but never both.
|
||||
//
|
||||
// Cleaning up after a test can be triggered in two ways:
|
||||
// - the test invokes the returned cleanup function,
|
||||
// usually in an AfterEach
|
||||
// - the test suite terminates, potentially after
|
||||
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
|
||||
//
|
||||
// PatchItems has the some limitations as LoadFromManifests:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
|
||||
var destructors []func() error
|
||||
var cleanupHandle CleanupActionHandle
|
||||
cleanup := func() {
|
||||
if cleanupHandle == nil {
|
||||
// Already done.
|
||||
return
|
||||
}
|
||||
RemoveCleanupAction(cleanupHandle)
|
||||
|
||||
// TODO (?): use same logic as framework.go for determining
|
||||
// whether we are expected to clean up? This would change the
|
||||
// meaning of the -delete-namespace and -delete-namespace-on-failure
|
||||
// command line flags, because they would also start to apply
|
||||
// to non-namespaced items.
|
||||
for _, destructor := range destructors {
|
||||
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
|
||||
Logf("deleting failed: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
cleanupHandle = AddCleanupAction(cleanup)
|
||||
|
||||
var result error
|
||||
for _, item := range items {
|
||||
// Each factory knows which item(s) it supports, so try each one.
|
||||
done := false
|
||||
description := DescribeItem(item)
|
||||
// Uncomment this line to get a full dump of the entire item.
|
||||
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
|
||||
Logf("creating %s", description)
|
||||
for _, factory := range Factories {
|
||||
destructor, err := factory.Create(f, item)
|
||||
if destructor != nil {
|
||||
destructors = append(destructors, func() error {
|
||||
Logf("deleting %s", description)
|
||||
return destructor()
|
||||
})
|
||||
}
|
||||
if err == nil {
|
||||
done = true
|
||||
break
|
||||
} else if errors.Cause(err) != ItemNotSupported {
|
||||
result = err
|
||||
break
|
||||
}
|
||||
}
|
||||
if result == nil && !done {
|
||||
result = errors.Errorf("item of type %T not supported", item)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
cleanup()
|
||||
return nil, result
|
||||
}
|
||||
|
||||
return cleanup, nil
|
||||
}
|
||||
|
||||
// CreateFromManifests is a combination of LoadFromManifests,
|
||||
// PatchItems, patching with an optional custom function,
|
||||
// and CreateItems.
|
||||
func (f *Framework) CreateFromManifests(patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
items, err := f.LoadFromManifests(files...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateFromManifests")
|
||||
}
|
||||
if err := f.PatchItems(items...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if patch != nil {
|
||||
for _, item := range items {
|
||||
if err := patch(item); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.CreateItems(items...)
|
||||
}
|
||||
|
||||
// What is a subset of metav1.TypeMeta which (in contrast to
|
||||
// metav1.TypeMeta itself) satisfies the runtime.Object interface.
|
||||
type What struct {
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
|
||||
func (in *What) DeepCopy() *What {
|
||||
return &What{Kind: in.Kind}
|
||||
}
|
||||
|
||||
func (in *What) DeepCopyInto(out *What) {
|
||||
out.Kind = in.Kind
|
||||
}
|
||||
|
||||
func (in *What) DeepCopyObject() runtime.Object {
|
||||
return &What{Kind: in.Kind}
|
||||
}
|
||||
|
||||
func (in *What) GetObjectKind() schema.ObjectKind {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ItemFactory provides support for creating one particular item.
|
||||
// The type gets exported because other packages might want to
|
||||
// extend the set of pre-defined factories.
|
||||
type ItemFactory interface {
|
||||
// New returns a new empty item.
|
||||
New() runtime.Object
|
||||
|
||||
// Create is responsible for creating the item. It returns an
|
||||
// error or a cleanup function for the created item.
|
||||
// If the item is of an unsupported type, it must return
|
||||
// an error that has ItemNotSupported as cause.
|
||||
Create(f *Framework, item interface{}) (func() error, error)
|
||||
}
|
||||
|
||||
// DescribeItem always returns a string that describes the item,
|
||||
// usually by calling out to cache.MetaNamespaceKeyFunc which
|
||||
// concatenates namespace (if set) and name. If that fails, the entire
|
||||
// item gets converted to a string.
|
||||
func DescribeItem(item interface{}) string {
|
||||
key, err := cache.MetaNamespaceKeyFunc(item)
|
||||
if err == nil && key != "" {
|
||||
return fmt.Sprintf("%T: %s", item, key)
|
||||
}
|
||||
return fmt.Sprintf("%T: %s", item, item)
|
||||
}
|
||||
|
||||
// ItemNotSupported is the error that Create methods
|
||||
// must return or wrap when they don't support the given item.
|
||||
var ItemNotSupported = errors.New("not supported")
|
||||
|
||||
var Factories = map[What]ItemFactory{
|
||||
{"ClusterRole"}: &clusterRoleFactory{},
|
||||
{"ClusterRoleBinding"}: &clusterRoleBindingFactory{},
|
||||
{"DaemonSet"}: &daemonSetFactory{},
|
||||
{"Role"}: &roleFactory{},
|
||||
{"RoleBinding"}: &roleBindingFactory{},
|
||||
{"Secret"}: &secretFactory{},
|
||||
{"Service"}: &serviceFactory{},
|
||||
{"ServiceAccount"}: &serviceAccountFactory{},
|
||||
{"StatefulSet"}: &statefulSetFactory{},
|
||||
{"StorageClass"}: &storageClassFactory{},
|
||||
}
|
||||
|
||||
// PatchName makes the name of some item unique by appending the
|
||||
// generated unique name.
|
||||
func (f *Framework) PatchName(item *string) {
|
||||
if *item != "" {
|
||||
*item = *item + "-" + f.UniqueName
|
||||
}
|
||||
}
|
||||
|
||||
// PatchNamespace moves the item into the test's namespace. Not
|
||||
// all items can be namespaced. For those, the name also needs to be
|
||||
// patched.
|
||||
func (f *Framework) PatchNamespace(item *string) {
|
||||
if f.Namespace != nil {
|
||||
*item = f.Namespace.GetName()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
switch item := item.(type) {
|
||||
case *rbac.Subject:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
case *rbac.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
// and get extended by tests.
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
f.PatchName(&item.Name)
|
||||
}
|
||||
case *rbac.ClusterRole:
|
||||
f.PatchName(&item.Name)
|
||||
case *rbac.Role:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
// Roles are namespaced, but because for RoleRef above we don't
|
||||
// know whether the referenced role is a ClusterRole or Role
|
||||
// and therefore always renames, we have to do the same here.
|
||||
f.PatchName(&item.Name)
|
||||
case *storage.StorageClass:
|
||||
f.PatchName(&item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *rbac.ClusterRoleBinding:
|
||||
f.PatchName(&item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
}
|
||||
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *rbac.RoleBinding:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
}
|
||||
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *v1.Service:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.StatefulSet:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.DaemonSet:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
default:
|
||||
return errors.Errorf("missing support for patching item of type %T", item)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The individual factories all follow the same template, but with
|
||||
// enough differences in types and functions that copy-and-paste
|
||||
// looked like the least dirty approach. Perhaps one day Go will have
|
||||
// generics.
|
||||
|
||||
type serviceAccountFactory struct{}
|
||||
|
||||
func (f *serviceAccountFactory) New() runtime.Object {
|
||||
return &v1.ServiceAccount{}
|
||||
}
|
||||
|
||||
func (*serviceAccountFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create ServiceAccount")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type clusterRoleFactory struct{}
|
||||
|
||||
func (f *clusterRoleFactory) New() runtime.Object {
|
||||
return &rbac.ClusterRole{}
|
||||
}
|
||||
|
||||
func (*clusterRoleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.ClusterRole)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
// Impersonation is required for Kubernetes < 1.12, see
|
||||
// https://github.com/kubernetes/kubernetes/issues/62237#issuecomment-429315111
|
||||
//
|
||||
// This code is kept even for more recent Kubernetes, because users of
|
||||
// the framework outside of Kubernetes might run against an older version
|
||||
// of Kubernetes. It will be deprecated eventually.
|
||||
//
|
||||
// TODO: is this only needed for a ClusterRole or also for other non-namespaced
|
||||
// items?
|
||||
Logf("Creating an impersonating superuser kubernetes clientset to define cluster role")
|
||||
rc, err := LoadConfig()
|
||||
ExpectNoError(err)
|
||||
rc.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: "superuser",
|
||||
Groups: []string{"system:masters"},
|
||||
}
|
||||
superuserClientset, err := clientset.NewForConfig(rc)
|
||||
ExpectNoError(err, "create superuser clientset")
|
||||
|
||||
client := superuserClientset.RbacV1().ClusterRoles()
|
||||
if _, err = client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRole")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type clusterRoleBindingFactory struct{}
|
||||
|
||||
func (f *clusterRoleBindingFactory) New() runtime.Object {
|
||||
return &rbac.ClusterRoleBinding{}
|
||||
}
|
||||
|
||||
func (*clusterRoleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.ClusterRoleBinding)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().ClusterRoleBindings()
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRoleBinding")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type roleFactory struct{}
|
||||
|
||||
func (f *roleFactory) New() runtime.Object {
|
||||
return &rbac.Role{}
|
||||
}
|
||||
|
||||
func (*roleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.Role)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create Role")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type roleBindingFactory struct{}
|
||||
|
||||
func (f *roleBindingFactory) New() runtime.Object {
|
||||
return &rbac.RoleBinding{}
|
||||
}
|
||||
|
||||
func (*roleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.RoleBinding)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create RoleBinding")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type serviceFactory struct{}
|
||||
|
||||
func (f *serviceFactory) New() runtime.Object {
|
||||
return &v1.Service{}
|
||||
}
|
||||
|
||||
func (*serviceFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.Service)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Services(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create Service")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type statefulSetFactory struct{}
|
||||
|
||||
func (f *statefulSetFactory) New() runtime.Object {
|
||||
return &apps.StatefulSet{}
|
||||
}
|
||||
|
||||
func (*statefulSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*apps.StatefulSet)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create StatefulSet")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type daemonSetFactory struct{}
|
||||
|
||||
func (f *daemonSetFactory) New() runtime.Object {
|
||||
return &apps.DaemonSet{}
|
||||
}
|
||||
|
||||
func (*daemonSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create DaemonSet")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type storageClassFactory struct{}
|
||||
|
||||
func (f *storageClassFactory) New() runtime.Object {
|
||||
return &storage.StorageClass{}
|
||||
}
|
||||
|
||||
func (*storageClassFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*storage.StorageClass)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.StorageV1().StorageClasses()
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create StorageClass")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type secretFactory struct{}
|
||||
|
||||
func (f *secretFactory) New() runtime.Object {
|
||||
return &v1.Secret{}
|
||||
}
|
||||
|
||||
func (*secretFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.Secret)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Secrets(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create Secret")
|
||||
}
|
||||
return func() error {
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrettyPrint returns a human-readable representation of an item.
|
||||
func PrettyPrint(item interface{}) string {
|
||||
data, err := json.MarshalIndent(item, "", " ")
|
||||
if err == nil {
|
||||
return string(data)
|
||||
}
|
||||
return fmt.Sprintf("%+v", item)
|
||||
}
|
9
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/e2e/framework/deployment_util.go
generated
vendored
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -30,9 +31,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*apps.Deployment, error) {
|
||||
@ -172,7 +175,9 @@ func WatchRecreateDeployment(c clientset.Interface, d *apps.Deployment) error {
|
||||
d.Generation <= d.Status.ObservedGeneration, nil
|
||||
}
|
||||
|
||||
_, err = watch.Until(2*time.Minute, w, condition)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, condition)
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status)
|
||||
}
|
||||
@ -255,7 +260,7 @@ func MakeDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: "busybox",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
|
91
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/test/e2e/framework/flake_reporting_util.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type FlakeReport struct {
|
||||
lock sync.RWMutex
|
||||
Flakes []string `json:"flakes"`
|
||||
FlakeCount int `json:"flakeCount"`
|
||||
}
|
||||
|
||||
func NewFlakeReport() *FlakeReport {
|
||||
return &FlakeReport{
|
||||
Flakes: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func buildDescription(optionalDescription ...interface{}) string {
|
||||
switch len(optionalDescription) {
|
||||
case 0:
|
||||
return ""
|
||||
default:
|
||||
return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// RecordFlakeIfError records the error (if non-nil) as a flake along with an optional description.
|
||||
// This can be used as a replacement of framework.ExpectNoError() for non-critical errors that can
|
||||
// be considered as 'flakes' to avoid causing failures in tests.
|
||||
func (f *FlakeReport) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("Unexpected error occurred: %v", err)
|
||||
desc := buildDescription(optionalDescription)
|
||||
if desc != "" {
|
||||
msg = fmt.Sprintf("%v (Description: %v)", msg, desc)
|
||||
}
|
||||
Logf(msg)
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
f.Flakes = append(f.Flakes, msg)
|
||||
f.FlakeCount++
|
||||
}
|
||||
|
||||
func (f *FlakeReport) GetFlakeCount() int {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return f.FlakeCount
|
||||
}
|
||||
|
||||
func (f *FlakeReport) PrintHumanReadable() string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
buf := bytes.Buffer{}
|
||||
buf.WriteString(fmt.Sprintf("FlakeCount: %v\n", f.FlakeCount))
|
||||
buf.WriteString("Flakes:\n")
|
||||
for _, flake := range f.Flakes {
|
||||
buf.WriteString(fmt.Sprintf("%v\n", flake))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (f *FlakeReport) PrintJSON() string {
|
||||
f.lock.RLock()
|
||||
defer f.lock.RUnlock()
|
||||
return PrettyPrintJSON(f)
|
||||
}
|
||||
|
||||
func (f *FlakeReport) SummaryKind() string {
|
||||
return "FlakeReport"
|
||||
}
|
115
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
115
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go
generated
vendored
@ -14,19 +14,25 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package framework contains provider-independent helper code for
|
||||
// building and running E2E tests with Ginkgo. The actual Ginkgo test
|
||||
// suites gets assembled by combining this framework, the optional
|
||||
// provider support code and specific tests via a separate .go file
|
||||
// like Kubernetes' test/e2e.go.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -36,16 +42,14 @@ import (
|
||||
"k8s.io/client-go/discovery"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
csi "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
aggregatorclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
@ -65,8 +69,15 @@ const (
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
// Set together with creating the ClientSet and the namespace.
|
||||
// Guaranteed to be unique in the cluster even when running the same
|
||||
// test multiple times in parallel.
|
||||
UniqueName string
|
||||
|
||||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
APIExtensionsClientSet apiextensionsclient.Interface
|
||||
CSIClientSet csi.Interface
|
||||
|
||||
InternalClientset *internalclientset.Clientset
|
||||
AggregatorClient *aggregatorclient.Clientset
|
||||
@ -90,6 +101,9 @@ type Framework struct {
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// Flaky operation failures in an e2e test can be captured through this.
|
||||
flakeReport *FlakeReport
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a Cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all Cleanup actions.
|
||||
@ -102,8 +116,6 @@ type Framework struct {
|
||||
// or stdout if ReportDir is not set once test ends.
|
||||
TestSummaries []TestDataSummary
|
||||
|
||||
kubemarkControllerCloseChannel chan struct{}
|
||||
|
||||
// Place to keep ClusterAutoscaler metrics from before test in order to compute delta.
|
||||
clusterAutoscalerMetricsBeforeTest metrics.MetricsCollection
|
||||
}
|
||||
@ -152,6 +164,15 @@ func (f *Framework) BeforeEach() {
|
||||
if f.ClientSet == nil {
|
||||
By("Creating a kubernetes client")
|
||||
config, err := LoadConfig()
|
||||
testDesc := CurrentGinkgoTestDescription()
|
||||
if len(testDesc.ComponentTexts) > 0 {
|
||||
componentTexts := strings.Join(testDesc.ComponentTexts, " ")
|
||||
config.UserAgent = fmt.Sprintf(
|
||||
"%v -- %v",
|
||||
rest.DefaultKubernetesUserAgent(),
|
||||
componentTexts)
|
||||
}
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.QPS = f.Options.ClientQPS
|
||||
config.Burst = f.Options.ClientBurst
|
||||
@ -163,12 +184,19 @@ func (f *Framework) BeforeEach() {
|
||||
}
|
||||
f.ClientSet, err = clientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.APIExtensionsClientSet, err = apiextensionsclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.InternalClientset, err = internalclientset.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.AggregatorClient, err = aggregatorclient.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.DynamicClient, err = dynamic.NewForConfig(config)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// csi.storage.k8s.io is based on CRD, which is served only as JSON
|
||||
jsonConfig := config
|
||||
jsonConfig.ContentType = "application/json"
|
||||
f.CSIClientSet, err = csi.NewForConfig(jsonConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
|
||||
// as they are required when creating a REST client.
|
||||
@ -188,29 +216,11 @@ func (f *Framework) BeforeEach() {
|
||||
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
|
||||
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
|
||||
|
||||
if ProviderIs("kubemark") && TestContext.KubemarkExternalKubeConfig != "" && TestContext.CloudConfig.KubemarkController == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", TestContext.KubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.KubemarkExternalClusterClientSet = externalClient
|
||||
f.kubemarkControllerCloseChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.KubemarkController, err = kubemark.NewKubemarkController(f.KubemarkExternalClusterClientSet, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(f.kubemarkControllerCloseChannel)
|
||||
Expect(TestContext.CloudConfig.KubemarkController.WaitForCacheSync(f.kubemarkControllerCloseChannel)).To(BeTrue())
|
||||
go TestContext.CloudConfig.KubemarkController.Run(f.kubemarkControllerCloseChannel)
|
||||
}
|
||||
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
|
||||
}
|
||||
|
||||
if !f.SkipNamespaceCreation {
|
||||
By("Building a namespace api object")
|
||||
By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
})
|
||||
@ -225,13 +235,27 @@ func (f *Framework) BeforeEach() {
|
||||
} else {
|
||||
Logf("Skipping waiting for service account")
|
||||
}
|
||||
f.UniqueName = f.Namespace.GetName()
|
||||
} else {
|
||||
// not guaranteed to be unique, but very likely
|
||||
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
var err error
|
||||
var nodeMode NodesSet
|
||||
switch TestContext.GatherKubeSystemResourceUsageData {
|
||||
case "master":
|
||||
nodeMode = MasterNodes
|
||||
case "masteranddns":
|
||||
nodeMode = MasterAndDNSNodes
|
||||
default:
|
||||
nodeMode = AllNodes
|
||||
}
|
||||
|
||||
f.gatherer, err = NewResourceUsageGatherer(f.ClientSet, ResourceGathererOptions{
|
||||
InKubemark: ProviderIs("kubemark"),
|
||||
MasterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
|
||||
Nodes: nodeMode,
|
||||
ResourceDataGatheringPeriod: 60 * time.Second,
|
||||
ProbeDuration: 15 * time.Second,
|
||||
PrintVerboseLogs: false,
|
||||
@ -269,6 +293,8 @@ func (f *Framework) BeforeEach() {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
f.flakeReport = NewFlakeReport()
|
||||
}
|
||||
|
||||
// AfterEach deletes the namespace, after reading its events.
|
||||
@ -322,29 +348,10 @@ func (f *Framework) AfterEach() {
|
||||
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
|
||||
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
// Pass both unversioned client and versioned clientset, till we have removed all uses of the unversioned client.
|
||||
if !f.SkipNamespaceCreation {
|
||||
DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
|
||||
logFunc := Logf
|
||||
if TestContext.ReportDir != "" {
|
||||
filePath := path.Join(TestContext.ReportDir, "image-puller.txt")
|
||||
file, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Failed to create a file with image-puller data %v: %v\nPrinting to stdout", filePath, err))
|
||||
} else {
|
||||
By(fmt.Sprintf("Dumping a list of prepulled images on each node to file %v", filePath))
|
||||
defer file.Close()
|
||||
if err = file.Chmod(0644); err != nil {
|
||||
Logf("Failed to chmod to 644 of %v: %v", filePath, err)
|
||||
}
|
||||
logFunc = GetLogToFileFunc(file)
|
||||
}
|
||||
} else {
|
||||
By("Dumping a list of prepulled images on each node...")
|
||||
}
|
||||
LogContainersInPodsWithLabels(f.ClientSet, metav1.NamespaceSystem, ImagePullerLabels, "image-puller", logFunc)
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
|
||||
@ -378,8 +385,12 @@ func (f *Framework) AfterEach() {
|
||||
}
|
||||
}
|
||||
|
||||
if TestContext.CloudConfig.KubemarkController != nil {
|
||||
close(f.kubemarkControllerCloseChannel)
|
||||
TestContext.CloudConfig.Provider.FrameworkAfterEach(f)
|
||||
|
||||
// Report any flakes that were observed in the e2e test and reset.
|
||||
if f.flakeReport != nil && f.flakeReport.GetFlakeCount() > 0 {
|
||||
f.TestSummaries = append(f.TestSummaries, f.flakeReport)
|
||||
f.flakeReport = nil
|
||||
}
|
||||
|
||||
PrintSummaries(f.TestSummaries, f.BaseName)
|
||||
@ -409,6 +420,10 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
return ns, err
|
||||
}
|
||||
|
||||
func (f *Framework) RecordFlakeIfError(err error, optionalDescription ...interface{}) {
|
||||
f.flakeReport.RecordFlakeIfError(err, optionalDescription)
|
||||
}
|
||||
|
||||
// AddNamespacesToDelete adds one or more namespaces to be deleted when the test
|
||||
// completes.
|
||||
func (f *Framework) AddNamespacesToDelete(namespaces ...*v1.Namespace) {
|
||||
@ -533,7 +548,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
|
||||
return nil
|
||||
} else {
|
||||
return []v1.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Port: int32(svcPort),
|
||||
TargetPort: intstr.FromInt(contPort),
|
||||
}}
|
||||
|
16
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
16
vendor/k8s.io/kubernetes/test/e2e/framework/gpu_util.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -50,13 +51,13 @@ func NumberOfNVIDIAGPUs(node *v1.Node) int64 {
|
||||
}
|
||||
|
||||
// NVIDIADevicePlugin returns the official Google Device Plugin pod for NVIDIA GPU in GKE
|
||||
func NVIDIADevicePlugin(ns string) *v1.Pod {
|
||||
func NVIDIADevicePlugin() *v1.Pod {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
p := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||
Namespace: ns,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
},
|
||||
|
||||
Spec: ds.Spec.Template.Spec,
|
||||
@ -69,7 +70,16 @@ func NVIDIADevicePlugin(ns string) *v1.Pod {
|
||||
|
||||
func GetGPUDevicePluginImage() string {
|
||||
ds, err := DsFromManifest(GPUDevicePluginDSYAML)
|
||||
if err != nil || ds == nil || len(ds.Spec.Template.Spec.Containers) < 1 {
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to parse the device plugin image: %v", err)
|
||||
return ""
|
||||
}
|
||||
if ds == nil {
|
||||
klog.Errorf("Failed to parse the device plugin image: the extracted DaemonSet is nil")
|
||||
return ""
|
||||
}
|
||||
if len(ds.Spec.Template.Spec.Containers) < 1 {
|
||||
klog.Errorf("Failed to parse the device plugin image: cannot extract the container from YAML")
|
||||
return ""
|
||||
}
|
||||
return ds.Spec.Template.Spec.Containers[0].Image
|
||||
|
42
vendor/k8s.io/kubernetes/test/e2e/framework/ingress/BUILD
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/test/e2e/framework/ingress/BUILD
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["ingress_utils.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/ingress",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
File diff suppressed because it is too large
Load Diff
52
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
52
vendor/k8s.io/kubernetes/test/e2e/framework/jobs_util.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
jobutil "k8s.io/kubernetes/pkg/controller/job"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -181,8 +182,8 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
// WaitForJobComplete uses c to wait for compeletions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
return wait.Poll(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -192,6 +193,17 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
|
||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return jobutil.IsJobFinished(curr), nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(Poll, timeout, func() (bool, error) {
|
||||
@ -239,6 +251,18 @@ func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parall
|
||||
return count == parallelism, nil
|
||||
}
|
||||
|
||||
// WaitForAllJobPodsRunning wait for all pods for the Job named jobName in namespace ns
|
||||
// to be deleted.
|
||||
func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
pods, err := GetJobPods(c, ns, jobName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(pods.Items) == 0, nil
|
||||
})
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
p := new(bool)
|
||||
*p = val
|
||||
@ -250,7 +274,7 @@ type updateJobFunc func(*batch.Job)
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
|
||||
pollErr := wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -268,3 +292,25 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
|
||||
// a non-nil deletionTimestamp (i.e. being deleted).
|
||||
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.ObjectMeta.DeletionTimestamp != nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
func JobFinishTime(finishedJob *batch.Job) metav1.Time {
|
||||
var finishTime metav1.Time
|
||||
for _, c := range finishedJob.Status.Conditions {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
return finishTime
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
2
vendor/k8s.io/kubernetes/test/e2e/framework/kubelet_stats.go
generated
vendored
@ -492,7 +492,7 @@ type usageDataPerContainer struct {
|
||||
}
|
||||
|
||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
|
||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/BUILD
generated
vendored
@ -21,12 +21,12 @@ go_library(
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/expfmt:go_default_library",
|
||||
"//vendor/github.com/prometheus/common/model:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/generic_metrics.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/generic_metrics.go
generated
vendored
@ -22,9 +22,9 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
type Metrics map[string]model.Samples
|
||||
@ -88,7 +88,7 @@ func parseMetrics(data string, output *Metrics) error {
|
||||
// Expected loop termination condition.
|
||||
return nil
|
||||
}
|
||||
glog.Warningf("Invalid Decode. Skipping.")
|
||||
klog.Warningf("Invalid Decode. Skipping.")
|
||||
continue
|
||||
}
|
||||
for _, metric := range v {
|
||||
|
10
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
10
vendor/k8s.io/kubernetes/test/e2e/framework/metrics/metrics_grabber.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -62,7 +62,7 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
|
||||
return nil, err
|
||||
}
|
||||
if len(nodeList.Items) < 1 {
|
||||
glog.Warning("Can't find any Nodes in the API server to grab metrics from")
|
||||
klog.Warning("Can't find any Nodes in the API server to grab metrics from")
|
||||
}
|
||||
for _, node := range nodeList.Items {
|
||||
if system.IsMasterNode(node.Name) {
|
||||
@ -76,9 +76,9 @@ func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets b
|
||||
controllers = false
|
||||
clusterAutoscaler = ec != nil
|
||||
if clusterAutoscaler {
|
||||
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
|
||||
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager is disabled.")
|
||||
} else {
|
||||
glog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
|
||||
klog.Warningf("Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.")
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,7 +127,7 @@ func (g *MetricsGrabber) GrabFromScheduler() (SchedulerMetrics, error) {
|
||||
if !g.registeredMaster {
|
||||
return SchedulerMetrics{}, fmt.Errorf("Master's Kubelet is not registered. Skipping Scheduler's metrics gathering.")
|
||||
}
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.SchedulerPort)
|
||||
output, err := g.getMetricsFromPod(g.client, fmt.Sprintf("%v-%v", "kube-scheduler", g.masterName), metav1.NamespaceSystem, ports.InsecureSchedulerPort)
|
||||
if err != nil {
|
||||
return SchedulerMetrics{}, err
|
||||
}
|
||||
|
167
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
167
vendor/k8s.io/kubernetes/test/e2e/framework/metrics_util.go
generated
vendored
@ -23,9 +23,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -232,6 +234,143 @@ func (l *SchedulingMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
type Histogram struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
Buckets map[string]int `json:"buckets"`
|
||||
}
|
||||
|
||||
type HistogramVec []Histogram
|
||||
|
||||
func newHistogram(labels map[string]string) *Histogram {
|
||||
return &Histogram{
|
||||
Labels: labels,
|
||||
Buckets: make(map[string]int),
|
||||
}
|
||||
}
|
||||
|
||||
type EtcdMetrics struct {
|
||||
BackendCommitDuration HistogramVec `json:"backendCommitDuration"`
|
||||
SnapshotSaveTotalDuration HistogramVec `json:"snapshotSaveTotalDuration"`
|
||||
PeerRoundTripTime HistogramVec `json:"peerRoundTripTime"`
|
||||
WalFsyncDuration HistogramVec `json:"walFsyncDuration"`
|
||||
MaxDatabaseSize float64 `json:"maxDatabaseSize"`
|
||||
}
|
||||
|
||||
func newEtcdMetrics() *EtcdMetrics {
|
||||
return &EtcdMetrics{
|
||||
BackendCommitDuration: make(HistogramVec, 0),
|
||||
SnapshotSaveTotalDuration: make(HistogramVec, 0),
|
||||
PeerRoundTripTime: make(HistogramVec, 0),
|
||||
WalFsyncDuration: make(HistogramVec, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (l *EtcdMetrics) SummaryKind() string {
|
||||
return "EtcdMetrics"
|
||||
}
|
||||
|
||||
func (l *EtcdMetrics) PrintHumanReadable() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
func (l *EtcdMetrics) PrintJSON() string {
|
||||
return PrettyPrintJSON(l)
|
||||
}
|
||||
|
||||
type EtcdMetricsCollector struct {
|
||||
stopCh chan struct{}
|
||||
wg *sync.WaitGroup
|
||||
metrics *EtcdMetrics
|
||||
}
|
||||
|
||||
func NewEtcdMetricsCollector() *EtcdMetricsCollector {
|
||||
return &EtcdMetricsCollector{
|
||||
stopCh: make(chan struct{}),
|
||||
wg: &sync.WaitGroup{},
|
||||
metrics: newEtcdMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
func getEtcdMetrics() ([]*model.Sample, error) {
|
||||
// Etcd is only exposed on localhost level. We are using ssh method
|
||||
if TestContext.Provider == "gke" {
|
||||
Logf("Not grabbing scheduler metrics through master SSH: unsupported for gke")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
cmd := "curl http://localhost:2379/metrics"
|
||||
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil || sshResult.Code != 0 {
|
||||
return nil, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
|
||||
}
|
||||
data := sshResult.Stdout
|
||||
|
||||
return extractMetricSamples(data)
|
||||
}
|
||||
|
||||
func getEtcdDatabaseSize() (float64, error) {
|
||||
samples, err := getEtcdMetrics()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, sample := range samples {
|
||||
if sample.Metric[model.MetricNameLabel] == "etcd_debugging_mvcc_db_total_size_in_bytes" {
|
||||
return float64(sample.Value), nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("Couldn't find etcd database size metric")
|
||||
}
|
||||
|
||||
// StartCollecting starts to collect etcd db size metric periodically
|
||||
// and updates MaxDatabaseSize accordingly.
|
||||
func (mc *EtcdMetricsCollector) StartCollecting(interval time.Duration) {
|
||||
mc.wg.Add(1)
|
||||
go func() {
|
||||
defer mc.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
dbSize, err := getEtcdDatabaseSize()
|
||||
if err != nil {
|
||||
Logf("Failed to collect etcd database size")
|
||||
continue
|
||||
}
|
||||
mc.metrics.MaxDatabaseSize = math.Max(mc.metrics.MaxDatabaseSize, dbSize)
|
||||
case <-mc.stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (mc *EtcdMetricsCollector) StopAndSummarize() error {
|
||||
close(mc.stopCh)
|
||||
mc.wg.Wait()
|
||||
|
||||
// Do some one-off collection of metrics.
|
||||
samples, err := getEtcdMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, sample := range samples {
|
||||
switch sample.Metric[model.MetricNameLabel] {
|
||||
case "etcd_disk_backend_commit_duration_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.BackendCommitDuration)
|
||||
case "etcd_debugging_snap_save_total_duration_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.SnapshotSaveTotalDuration)
|
||||
case "etcd_disk_wal_fsync_duration_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.WalFsyncDuration)
|
||||
case "etcd_network_peer_round_trip_time_seconds_bucket":
|
||||
convertSampleToBucket(sample, &mc.metrics.PeerRoundTripTime)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *EtcdMetricsCollector) GetMetrics() *EtcdMetrics {
|
||||
return mc.metrics
|
||||
}
|
||||
|
||||
type SaturationTime struct {
|
||||
TimeToSaturate time.Duration `json:"timeToSaturate"`
|
||||
NumberOfNodes int `json:"numberOfNodes"`
|
||||
@ -472,7 +611,7 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
|
||||
Context(ctx).
|
||||
Namespace(metav1.NamespaceSystem).
|
||||
Resource("pods").
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
|
||||
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.InsecureSchedulerPort)).
|
||||
SubResource("proxy").
|
||||
Suffix("metrics").
|
||||
Do().Raw()
|
||||
@ -500,6 +639,9 @@ func sendRestRequestToScheduler(c clientset.Interface, op string) (string, error
|
||||
func getSchedulingLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
result := SchedulingMetrics{}
|
||||
data, err := sendRestRequestToScheduler(c, "GET")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
samples, err := extractMetricSamples(data)
|
||||
if err != nil {
|
||||
@ -546,12 +688,33 @@ func VerifySchedulerLatency(c clientset.Interface) (*SchedulingMetrics, error) {
|
||||
|
||||
func ResetSchedulerMetrics(c clientset.Interface) error {
|
||||
responseText, err := sendRestRequestToScheduler(c, "DELETE")
|
||||
if err != nil || responseText != "metrics reset\n" {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unexpected response: %q", responseText)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertSampleToBucket(sample *model.Sample, h *HistogramVec) {
|
||||
labels := make(map[string]string)
|
||||
for k, v := range sample.Metric {
|
||||
if k != "le" {
|
||||
labels[string(k)] = string(v)
|
||||
}
|
||||
}
|
||||
var hist *Histogram
|
||||
for i := range *h {
|
||||
if reflect.DeepEqual(labels, (*h)[i].Labels) {
|
||||
hist = &((*h)[i])
|
||||
break
|
||||
}
|
||||
}
|
||||
if hist == nil {
|
||||
hist = newHistogram(labels)
|
||||
*h = append(*h, *hist)
|
||||
}
|
||||
hist.Buckets[string(sample.Metric["le"])] = int(sample.Value)
|
||||
}
|
||||
|
||||
func PrettyPrintJSON(metrics interface{}) string {
|
||||
output := &bytes.Buffer{}
|
||||
if err := json.NewEncoder(output).Encode(metrics); err != nil {
|
||||
|
19
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
19
vendor/k8s.io/kubernetes/test/e2e/framework/networking_utils.go
generated
vendored
@ -292,13 +292,13 @@ func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targ
|
||||
if protocol == "udp" {
|
||||
// TODO: It would be enough to pass 1s+epsilon to timeout, but unfortunately
|
||||
// busybox timeout doesn't support non-integer values.
|
||||
cmd = fmt.Sprintf("echo 'hostName' | timeout -t 2 nc -w 1 -u %s %d", targetIP, targetPort)
|
||||
cmd = fmt.Sprintf("echo 'hostName' | nc -w 1 -u %s %d", targetIP, targetPort)
|
||||
} else {
|
||||
ipPort := net.JoinHostPort(targetIP, strconv.Itoa(targetPort))
|
||||
// The current versions of curl included in CentOS and RHEL distros
|
||||
// misinterpret square brackets around IPv6 as globbing, so use the -g
|
||||
// argument to disable globbing to handle the IPv6 case.
|
||||
cmd = fmt.Sprintf("timeout -t 15 curl -g -q -s --connect-timeout 1 http://%s/hostName", ipPort)
|
||||
cmd = fmt.Sprintf("curl -g -q -s --max-time 15 --connect-timeout 1 http://%s/hostName", ipPort)
|
||||
}
|
||||
|
||||
// TODO: This simply tells us that we can reach the endpoints. Check that
|
||||
@ -948,8 +948,11 @@ func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout
|
||||
// This function executes commands on a node so it will work only for some
|
||||
// environments.
|
||||
func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
|
||||
host := GetNodeExternalIP(node)
|
||||
master := GetMasterAddress(c)
|
||||
host, err := GetNodeExternalIP(node)
|
||||
if err != nil {
|
||||
Failf("Error getting node external ip : %v", err)
|
||||
}
|
||||
masterAddresses := GetAllMasterAddresses(c)
|
||||
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
|
||||
defer func() {
|
||||
// This code will execute even if setting the iptables rule failed.
|
||||
@ -957,14 +960,18 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
|
||||
// had been inserted. (yes, we could look at the error code and ssh error
|
||||
// separately, but I prefer to stay on the safe side).
|
||||
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
|
||||
UnblockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
UnblockNetwork(host, masterAddress)
|
||||
}
|
||||
}()
|
||||
|
||||
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
|
||||
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
||||
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
BlockNetwork(host, master)
|
||||
for _, masterAddress := range masterAddresses {
|
||||
BlockNetwork(host, masterAddress)
|
||||
}
|
||||
|
||||
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
||||
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/framework/nodes_util.go
generated
vendored
@ -63,7 +63,7 @@ func etcdUpgradeGCE(target_storage, target_version string) error {
|
||||
os.Environ(),
|
||||
"TEST_ETCD_VERSION="+target_version,
|
||||
"STORAGE_BACKEND="+target_storage,
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
"TEST_ETCD_IMAGE=3.2.24-1")
|
||||
|
||||
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
|
||||
return err
|
||||
@ -103,7 +103,7 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
|
||||
env = append(env,
|
||||
"TEST_ETCD_VERSION="+TestContext.EtcdUpgradeVersion,
|
||||
"STORAGE_BACKEND="+TestContext.EtcdUpgradeStorage,
|
||||
"TEST_ETCD_IMAGE=3.2.18-0")
|
||||
"TEST_ETCD_IMAGE=3.2.24-1")
|
||||
} else {
|
||||
// In e2e tests, we skip the confirmation prompt about
|
||||
// implicit etcd upgrades to simulate the user entering "y".
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/podlogs/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/test/e2e/framework/podlogs/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["podlogs.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/podlogs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
263
vendor/k8s.io/kubernetes/test/e2e/framework/podlogs/podlogs.go
generated
vendored
Normal file
263
vendor/k8s.io/kubernetes/test/e2e/framework/podlogs/podlogs.go
generated
vendored
Normal file
@ -0,0 +1,263 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package podlogs enables live capturing of all events and log
|
||||
// messages for some or all pods in a namespace as they get generated.
|
||||
// This helps debugging both a running test (what is currently going
|
||||
// on?) and the output of a CI run (events appear in chronological
|
||||
// order and output that normally isn't available like the command
|
||||
// stdout messages are available).
|
||||
package podlogs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// LogsForPod starts reading the logs for a certain pod. If the pod has more than one
|
||||
// container, opts.Container must be set. Reading stops when the context is done.
|
||||
// The stream includes formatted error messages and ends with
|
||||
// rpc error: code = Unknown desc = Error: No such container: 41a...
|
||||
// when the pod gets deleted while streaming.
|
||||
func LogsForPod(ctx context.Context, cs clientset.Interface, ns, pod string, opts *v1.PodLogOptions) (io.ReadCloser, error) {
|
||||
req := cs.Core().Pods(ns).GetLogs(pod, opts)
|
||||
return req.Context(ctx).Stream()
|
||||
}
|
||||
|
||||
// LogOutput determines where output from CopyAllLogs goes.
|
||||
type LogOutput struct {
|
||||
// If not nil, errors will be logged here.
|
||||
StatusWriter io.Writer
|
||||
|
||||
// If not nil, all output goes to this writer with "<pod>/<container>:" as prefix.
|
||||
LogWriter io.Writer
|
||||
|
||||
// Base directory for one log file per container.
|
||||
// The full path of each log file will be <log path prefix><pod>-<container>.log.
|
||||
LogPathPrefix string
|
||||
}
|
||||
|
||||
// Matches harmless errors from pkg/kubelet/kubelet_pods.go.
|
||||
var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|waiting to start|not available)|the server could not find the requested resource`)
|
||||
|
||||
// CopyAllLogs follows the logs of all containers in all pods,
|
||||
// including those that get created in the future, and writes each log
|
||||
// line as configured in the output options. It does that until the
|
||||
// context is done or until an error occurs.
|
||||
//
|
||||
// Beware that there is currently no way to force log collection
|
||||
// before removing pods, which means that there is a known race
|
||||
// between "stop pod" and "collecting log entries". The alternative
|
||||
// would be a blocking function with collects logs from all currently
|
||||
// running pods, but that then would have the disadvantage that
|
||||
// already deleted pods aren't covered.
|
||||
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
|
||||
watcher, err := cs.Core().Pods(ns).Watch(meta.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot create Pod event watcher")
|
||||
}
|
||||
|
||||
go func() {
|
||||
var m sync.Mutex
|
||||
logging := map[string]bool{}
|
||||
check := func() {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
pods, err := cs.Core().Pods(ns).List(meta.ListOptions{})
|
||||
if err != nil {
|
||||
if to.StatusWriter != nil {
|
||||
fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
for _, c := range pod.Spec.Containers {
|
||||
name := pod.ObjectMeta.Name + "/" + c.Name
|
||||
if logging[name] {
|
||||
continue
|
||||
}
|
||||
readCloser, err := LogsForPod(ctx, cs, ns, pod.ObjectMeta.Name,
|
||||
&v1.PodLogOptions{
|
||||
Container: c.Name,
|
||||
Follow: true,
|
||||
})
|
||||
if err != nil {
|
||||
// We do get "normal" errors here, like trying to read too early.
|
||||
// We can ignore those.
|
||||
if to.StatusWriter != nil &&
|
||||
expectedErrors.FindStringIndex(err.Error()) == nil {
|
||||
fmt.Fprintf(to.StatusWriter, "WARNING: pod log: %s: %s\n", name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Determine where we write. If this fails, we intentionally return without clearing
|
||||
// the logging[name] flag, which prevents trying over and over again to
|
||||
// create the output file.
|
||||
var out io.Writer
|
||||
var closer io.Closer
|
||||
var prefix string
|
||||
if to.LogWriter != nil {
|
||||
out = to.LogWriter
|
||||
prefix = name + ": "
|
||||
} else {
|
||||
var err error
|
||||
filename := to.LogPathPrefix + pod.ObjectMeta.Name + "-" + c.Name + ".log"
|
||||
err = os.MkdirAll(path.Dir(filename), 0755)
|
||||
if err != nil {
|
||||
if to.StatusWriter != nil {
|
||||
fmt.Fprintf(to.StatusWriter, "ERROR: pod log: create directory for %s: %s\n", filename, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
// The test suite might run the same test multiple times,
|
||||
// so we have to append here.
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
if to.StatusWriter != nil {
|
||||
fmt.Fprintf(to.StatusWriter, "ERROR: pod log: create file %s: %s\n", filename, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
closer = file
|
||||
out = file
|
||||
}
|
||||
go func() {
|
||||
if closer != nil {
|
||||
defer closer.Close()
|
||||
}
|
||||
defer func() {
|
||||
m.Lock()
|
||||
logging[name] = false
|
||||
m.Unlock()
|
||||
readCloser.Close()
|
||||
}()
|
||||
scanner := bufio.NewScanner(readCloser)
|
||||
first := true
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
// Filter out the expected "end of stream" error message,
|
||||
// it would just confuse developers who don't know about it.
|
||||
// Same for attempts to read logs from a container that
|
||||
// isn't ready (yet?!).
|
||||
if !strings.HasPrefix(line, "rpc error: code = Unknown desc = Error: No such container:") &&
|
||||
!strings.HasPrefix(line, "Unable to retrieve container logs for ") {
|
||||
if first {
|
||||
if to.LogWriter == nil {
|
||||
// Because the same log might be written to multiple times
|
||||
// in different test instances, log an extra line to separate them.
|
||||
// Also provides some useful extra information.
|
||||
fmt.Fprintf(out, "==== start of log for container %s ====\n", name)
|
||||
}
|
||||
first = false
|
||||
}
|
||||
fmt.Fprintf(out, "%s%s\n", prefix, scanner.Text())
|
||||
}
|
||||
}
|
||||
}()
|
||||
logging[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Watch events to see whether we can start logging
|
||||
// and log interesting ones.
|
||||
check()
|
||||
for {
|
||||
select {
|
||||
case <-watcher.ResultChan():
|
||||
check()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WatchPods prints pod status events for a certain namespace or all namespaces
|
||||
// when namespace name is empty.
|
||||
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error {
|
||||
watcher, err := cs.Core().Pods(ns).Watch(meta.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot create Pod event watcher")
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer watcher.Stop()
|
||||
for {
|
||||
select {
|
||||
case e := <-watcher.ResultChan():
|
||||
if e.Object == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pod, ok := e.Object.(*v1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
buffer := new(bytes.Buffer)
|
||||
fmt.Fprintf(buffer,
|
||||
"pod event: %s: %s/%s %s: %s %s\n",
|
||||
e.Type,
|
||||
pod.Namespace,
|
||||
pod.Name,
|
||||
pod.Status.Phase,
|
||||
pod.Status.Reason,
|
||||
pod.Status.Conditions,
|
||||
)
|
||||
for _, cst := range pod.Status.ContainerStatuses {
|
||||
fmt.Fprintf(buffer, " %s: ", cst.Name)
|
||||
if cst.State.Waiting != nil {
|
||||
fmt.Fprintf(buffer, "WAITING: %s - %s",
|
||||
cst.State.Waiting.Reason,
|
||||
cst.State.Waiting.Message,
|
||||
)
|
||||
} else if cst.State.Running != nil {
|
||||
fmt.Fprintf(buffer, "RUNNING")
|
||||
} else if cst.State.Waiting != nil {
|
||||
fmt.Fprintf(buffer, "TERMINATED: %s - %s",
|
||||
cst.State.Waiting.Reason,
|
||||
cst.State.Waiting.Message,
|
||||
)
|
||||
}
|
||||
fmt.Fprintf(buffer, "\n")
|
||||
}
|
||||
to.Write(buffer.Bytes())
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
33
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
33
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/sysctl"
|
||||
|
||||
@ -77,6 +78,32 @@ func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
|
||||
return p
|
||||
}
|
||||
|
||||
// CreateEventually retries pod creation for a while before failing
|
||||
// the test with the most recent error. This mimicks the behavior
|
||||
// of a controller (like the one for DaemonSet) and is necessary
|
||||
// because pod creation can fail while its service account is still
|
||||
// getting provisioned
|
||||
// (https://github.com/kubernetes/kubernetes/issues/68776).
|
||||
//
|
||||
// Both the timeout and polling interval are configurable as optional
|
||||
// arguments:
|
||||
// - The first optional argument is the timeout.
|
||||
// - The second optional argument is the polling interval.
|
||||
//
|
||||
// Both intervals can either be specified as time.Duration, parsable
|
||||
// duration strings or as floats/integers. In the last case they are
|
||||
// interpreted as seconds.
|
||||
func (c *PodClient) CreateEventually(pod *v1.Pod, opts ...interface{}) *v1.Pod {
|
||||
c.mungeSpec(pod)
|
||||
var ret *v1.Pod
|
||||
Eventually(func() error {
|
||||
p, err := c.PodInterface.Create(pod)
|
||||
ret = p
|
||||
return err
|
||||
}, opts...).ShouldNot(HaveOccurred(), "Failed to create %q pod", pod.GetName())
|
||||
return ret
|
||||
}
|
||||
|
||||
// CreateSync creates a new pod according to the framework specifications in the given namespace, and waits for it to start.
|
||||
func (c *PodClient) CreateSyncInNamespace(pod *v1.Pod, namespace string) *v1.Pod {
|
||||
p := c.Create(pod)
|
||||
@ -259,3 +286,9 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PodClient) PodIsReady(name string) bool {
|
||||
pod, err := c.Get(name, metav1.GetOptions{})
|
||||
ExpectNoError(err)
|
||||
return podutil.IsPodReady(pod)
|
||||
}
|
||||
|
105
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
105
vendor/k8s.io/kubernetes/test/e2e/framework/profile_gatherer.go
generated
vendored
@ -61,25 +61,54 @@ func checkProfileGatheringPrerequisites() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
func getPortForComponent(componentName string) (int, error) {
|
||||
switch componentName {
|
||||
case "kube-apiserver":
|
||||
return 8080, nil
|
||||
case "kube-scheduler":
|
||||
return 10251, nil
|
||||
case "kube-controller-manager":
|
||||
return 10252, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Port for component %v unknown", componentName)
|
||||
}
|
||||
|
||||
// Gathers profiles from a master component through SSH. E.g usages:
|
||||
// - gatherProfile("kube-apiserver", "someTest", "heap")
|
||||
// - gatherProfile("kube-scheduler", "someTest", "profile")
|
||||
// - gatherProfile("kube-controller-manager", "someTest", "profile?seconds=20")
|
||||
//
|
||||
// We don't export this method but wrappers around it (see below).
|
||||
func gatherProfile(componentName, profileBaseName, profileKind string) error {
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
return fmt.Errorf("Profile gathering pre-requisite failed: %v", err)
|
||||
}
|
||||
profilePort, err := getPortForComponent(componentName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Profile gathering failed finding component port: %v", err)
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// Get the profile data over SSH.
|
||||
getCommand := fmt.Sprintf("curl -s localhost:8080/debug/pprof/%s", kind)
|
||||
getCommand := fmt.Sprintf("curl -s localhost:%v/debug/pprof/%s", profilePort, profileKind)
|
||||
sshResult, err := SSH(getCommand, GetMasterHost()+":22", TestContext.Provider)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to execute curl command on master through SSH: %v", err)
|
||||
}
|
||||
|
||||
var profilePrefix string
|
||||
profilePrefix := componentName
|
||||
switch {
|
||||
case kind == "heap":
|
||||
profilePrefix = "ApiserverMemoryProfile_"
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
profilePrefix = "ApiserverCPUProfile_"
|
||||
case profileKind == "heap":
|
||||
profilePrefix += "_MemoryProfile_"
|
||||
case strings.HasPrefix(profileKind, "profile"):
|
||||
profilePrefix += "_CPUProfile_"
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
|
||||
}
|
||||
|
||||
// Write the data to a file.
|
||||
// Write the profile data to a file.
|
||||
rawprofilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pprof")
|
||||
rawprofile, err := os.Create(rawprofilePath)
|
||||
if err != nil {
|
||||
@ -97,12 +126,12 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
var cmd *exec.Cmd
|
||||
switch {
|
||||
// TODO: Support other profile kinds if needed (e.g inuse_space, alloc_objects, mutex, etc)
|
||||
case kind == "heap":
|
||||
case profileKind == "heap":
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", "--alloc_space", rawprofile.Name())
|
||||
case strings.HasPrefix(kind, "profile"):
|
||||
case strings.HasPrefix(profileKind, "profile"):
|
||||
cmd = exec.Command("go", "tool", "pprof", "-pdf", "-symbolize=none", rawprofile.Name())
|
||||
default:
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", kind)
|
||||
return fmt.Errorf("Unknown profile kind provided: %s", profileKind)
|
||||
}
|
||||
outfilePath := path.Join(getProfilesDirectoryPath(), profilePrefix+profileBaseName+".pdf")
|
||||
outfile, err := os.Create(outfilePath)
|
||||
@ -124,67 +153,53 @@ func gatherProfileOfKind(profileBaseName, kind string) error {
|
||||
// finish before the parent goroutine itself finishes, we accept a sync.WaitGroup
|
||||
// argument in these functions. Typically you would use the following pattern:
|
||||
//
|
||||
// func TestFooBar() {
|
||||
// func TestFoo() {
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(3)
|
||||
// go framework.GatherApiserverCPUProfile(&wg, "doing_foo")
|
||||
// go framework.GatherApiserverMemoryProfile(&wg, "doing_foo")
|
||||
// go framework.GatherCPUProfile("kube-apiserver", "before_foo", &wg)
|
||||
// go framework.GatherMemoryProfile("kube-apiserver", "before_foo", &wg)
|
||||
// <<<< some code doing foo >>>>>>
|
||||
// go framework.GatherApiserverCPUProfile(&wg, "doing_bar")
|
||||
// <<<< some code doing bar >>>>>>
|
||||
// go framework.GatherCPUProfile("kube-scheduler", "after_foo", &wg)
|
||||
// wg.Wait()
|
||||
// }
|
||||
//
|
||||
// If you do not wish to exercise the waiting logic, pass a nil value for the
|
||||
// waitgroup argument instead. However, then you would be responsible for ensuring
|
||||
// that the function finishes.
|
||||
// that the function finishes. There's also a polling-based gatherer utility for
|
||||
// CPU profiles available below.
|
||||
|
||||
func GatherApiserverCPUProfile(wg *sync.WaitGroup, profileBaseName string) {
|
||||
GatherApiserverCPUProfileForNSeconds(wg, profileBaseName, DefaultCPUProfileSeconds)
|
||||
func GatherCPUProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
|
||||
GatherCPUProfileForSeconds(componentName, profileBaseName, DefaultCPUProfileSeconds, wg)
|
||||
}
|
||||
|
||||
func GatherApiserverCPUProfileForNSeconds(wg *sync.WaitGroup, profileBaseName string, n int) {
|
||||
func GatherCPUProfileForSeconds(componentName string, profileBaseName string, seconds int, wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
Logf("Profile gathering pre-requisite failed: %v", err)
|
||||
return
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := gatherProfileOfKind(profileBaseName, fmt.Sprintf("profile?seconds=%v", n)); err != nil {
|
||||
Logf("Failed to gather apiserver CPU profile: %v", err)
|
||||
if err := gatherProfile(componentName, profileBaseName, fmt.Sprintf("profile?seconds=%v", seconds)); err != nil {
|
||||
Logf("Failed to gather %v CPU profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
func GatherApiserverMemoryProfile(wg *sync.WaitGroup, profileBaseName string) {
|
||||
func GatherMemoryProfile(componentName string, profileBaseName string, wg *sync.WaitGroup) {
|
||||
if wg != nil {
|
||||
defer wg.Done()
|
||||
}
|
||||
if err := checkProfileGatheringPrerequisites(); err != nil {
|
||||
Logf("Profile gathering pre-requisite failed: %v", err)
|
||||
return
|
||||
}
|
||||
if profileBaseName == "" {
|
||||
profileBaseName = time.Now().Format(time.RFC3339)
|
||||
}
|
||||
if err := gatherProfileOfKind(profileBaseName, "heap"); err != nil {
|
||||
Logf("Failed to gather apiserver memory profile: %v", err)
|
||||
if err := gatherProfile(componentName, profileBaseName, "heap"); err != nil {
|
||||
Logf("Failed to gather %v memory profile: %v", componentName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// StartApiserverCPUProfileGatherer is a polling-based gatherer of the apiserver's
|
||||
// CPU profile. It takes the delay b/w consecutive gatherings as an argument and
|
||||
// StartCPUProfileGatherer performs polling-based gathering of the component's CPU
|
||||
// profile. It takes the interval b/w consecutive gatherings as an argument and
|
||||
// starts the gathering goroutine. To stop the gatherer, close the returned channel.
|
||||
func StartApiserverCPUProfileGatherer(delay time.Duration) chan struct{} {
|
||||
func StartCPUProfileGatherer(componentName string, profileBaseName string, interval time.Duration) chan struct{} {
|
||||
stopCh := make(chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
GatherApiserverCPUProfile(nil, "")
|
||||
case <-time.After(interval):
|
||||
GatherCPUProfile(componentName, profileBaseName+"_"+time.Now().Format(time.RFC3339), nil)
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
|
145
vendor/k8s.io/kubernetes/test/e2e/framework/provider.go
generated
vendored
Normal file
145
vendor/k8s.io/kubernetes/test/e2e/framework/provider.go
generated
vendored
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type Factory func() (ProviderInterface, error)
|
||||
|
||||
var (
|
||||
providers = make(map[string]Factory)
|
||||
mutex sync.Mutex
|
||||
)
|
||||
|
||||
// RegisterProvider is expected to be called during application init,
|
||||
// typically by an init function in a provider package.
|
||||
func RegisterProvider(name string, factory Factory) {
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
if _, ok := providers[name]; ok {
|
||||
panic(fmt.Sprintf("provider %s already registered", name))
|
||||
}
|
||||
providers[name] = factory
|
||||
}
|
||||
|
||||
func init() {
|
||||
// "local" or "skeleton" can always be used.
|
||||
RegisterProvider("local", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
RegisterProvider("skeleton", func() (ProviderInterface, error) {
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
// The empty string also works, but triggers a warning.
|
||||
RegisterProvider("", func() (ProviderInterface, error) {
|
||||
Logf("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
|
||||
return NullProvider{}, nil
|
||||
})
|
||||
}
|
||||
|
||||
// SetupProviderConfig validates the chosen provider and creates
|
||||
// an interface instance for it.
|
||||
func SetupProviderConfig(providerName string) (ProviderInterface, error) {
|
||||
var err error
|
||||
|
||||
mutex.Lock()
|
||||
defer mutex.Unlock()
|
||||
factory, ok := providers[providerName]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(os.ErrNotExist, "The provider %s is unknown.", providerName)
|
||||
}
|
||||
provider, err := factory()
|
||||
|
||||
return provider, err
|
||||
}
|
||||
|
||||
// ProviderInterface contains the implementation for certain
|
||||
// provider-specific functionality.
|
||||
type ProviderInterface interface {
|
||||
FrameworkBeforeEach(f *Framework)
|
||||
FrameworkAfterEach(f *Framework)
|
||||
|
||||
ResizeGroup(group string, size int32) error
|
||||
GetGroupNodes(group string) ([]string, error)
|
||||
GroupSize(group string) (int, error)
|
||||
|
||||
CreatePD(zone string) (string, error)
|
||||
DeletePD(pdName string) error
|
||||
CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error)
|
||||
DeletePVSource(pvSource *v1.PersistentVolumeSource) error
|
||||
|
||||
CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string)
|
||||
|
||||
EnsureLoadBalancerResourcesDeleted(ip, portRange string) error
|
||||
LoadBalancerSrcRanges() []string
|
||||
EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service))
|
||||
}
|
||||
|
||||
// NullProvider is the default implementation of the ProviderInterface
|
||||
// which doesn't do anything.
|
||||
type NullProvider struct{}
|
||||
|
||||
func (n NullProvider) FrameworkBeforeEach(f *Framework) {}
|
||||
func (n NullProvider) FrameworkAfterEach(f *Framework) {}
|
||||
|
||||
func (n NullProvider) ResizeGroup(string, int32) error {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
func (n NullProvider) GetGroupNodes(group string) ([]string, error) {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
func (n NullProvider) GroupSize(group string) (int, error) {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
|
||||
func (n NullProvider) CreatePD(zone string) (string, error) {
|
||||
return "", fmt.Errorf("provider does not support volume creation")
|
||||
}
|
||||
func (n NullProvider) DeletePD(pdName string) error {
|
||||
return fmt.Errorf("provider does not support volume deletion")
|
||||
}
|
||||
func (n NullProvider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
|
||||
return nil, fmt.Errorf("Provider not supported")
|
||||
}
|
||||
func (n NullProvider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return fmt.Errorf("Provider not supported")
|
||||
}
|
||||
|
||||
func (n NullProvider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
}
|
||||
|
||||
func (n NullProvider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
return nil
|
||||
}
|
||||
func (n NullProvider) LoadBalancerSrcRanges() []string {
|
||||
return nil
|
||||
}
|
||||
func (n NullProvider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
|
||||
nop := func(svc *v1.Service) {}
|
||||
return nop, nop
|
||||
}
|
||||
|
||||
var _ ProviderInterface = NullProvider{}
|
32
vendor/k8s.io/kubernetes/test/e2e/framework/providers/aws/BUILD
generated
vendored
Normal file
32
vendor/k8s.io/kubernetes/test/e2e/framework/providers/aws/BUILD
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["aws.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/providers/aws",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/awserr:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
129
vendor/k8s.io/kubernetes/test/e2e/framework/providers/aws/aws.go
generated
vendored
Normal file
129
vendor/k8s.io/kubernetes/test/e2e/framework/providers/aws/aws.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("aws", NewProvider)
|
||||
}
|
||||
|
||||
func NewProvider() (framework.ProviderInterface, error) {
|
||||
if framework.TestContext.CloudConfig.Zone == "" {
|
||||
return nil, fmt.Errorf("gce-zone must be specified for AWS")
|
||||
}
|
||||
return &Provider{}, nil
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
}
|
||||
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
}
|
||||
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
client := newAWSClient(zone)
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
||||
response, err := client.CreateVolume(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
az := aws.StringValue(response.AvailabilityZone)
|
||||
awsID := aws.StringValue(response.VolumeId)
|
||||
|
||||
volumeName := "aws://" + az + "/" + awsID
|
||||
return volumeName, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePD(pdName string) error {
|
||||
client := newAWSClient("")
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
|
||||
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
||||
_, err := client.DeleteVolume(request)
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||
} else {
|
||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
|
||||
return &v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: diskName,
|
||||
FSType: "ext3",
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return framework.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
|
||||
}
|
||||
|
||||
func newAWSClient(zone string) *ec2.EC2 {
|
||||
var cfg *aws.Config
|
||||
|
||||
if zone == "" {
|
||||
zone = framework.TestContext.CloudConfig.Zone
|
||||
}
|
||||
if zone == "" {
|
||||
framework.Logf("Warning: No AWS zone configured!")
|
||||
cfg = nil
|
||||
} else {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
}
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/providers/azure/BUILD
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/test/e2e/framework/providers/azure/BUILD
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["azure.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/providers/azure",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
80
vendor/k8s.io/kubernetes/test/e2e/framework/providers/azure/azure.go
generated
vendored
Normal file
80
vendor/k8s.io/kubernetes/test/e2e/framework/providers/azure/azure.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("azure", NewProvider)
|
||||
}
|
||||
|
||||
func NewProvider() (framework.ProviderInterface, error) {
|
||||
if framework.TestContext.CloudConfig.ConfigFile == "" {
|
||||
return nil, fmt.Errorf("config-file must be specified for Azure")
|
||||
}
|
||||
config, err := os.Open(framework.TestContext.CloudConfig.ConfigFile)
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't open cloud provider configuration %s: %#v",
|
||||
framework.TestContext.CloudConfig.ConfigFile, err)
|
||||
}
|
||||
defer config.Close()
|
||||
azureCloud, err := azure.NewCloud(config)
|
||||
return &Provider{
|
||||
azureCloud: azureCloud.(*azure.Cloud),
|
||||
}, err
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
|
||||
azureCloud *azure.Cloud
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
|
||||
_, diskURI, _, err := p.azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return diskURI, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePD(pdName string) error {
|
||||
if err := p.azureCloud.DeleteVolume(pdName); err != nil {
|
||||
framework.Logf("failed to delete Azure volume %q: %v", pdName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azure.ServiceAnnotationLoadBalancerInternal: "true"}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azure.ServiceAnnotationLoadBalancerInternal: "false"}
|
||||
}
|
||||
return
|
||||
}
|
48
vendor/k8s.io/kubernetes/test/e2e/framework/providers/gce/BUILD
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/test/e2e/framework/providers/gce/BUILD
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"firewall.go",
|
||||
"gce.go",
|
||||
"ingress.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/providers/gce",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["firewall_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -23,15 +23,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -51,10 +52,10 @@ func MakeFirewallNameForLBService(name string) string {
|
||||
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
|
||||
fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc))
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
if svc.Spec.LoadBalancerSourceRanges == nil {
|
||||
fw.SourceRanges = []string{"0.0.0.0/0"}
|
||||
@ -77,10 +78,10 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal
|
||||
// ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
|
||||
func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall {
|
||||
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
|
||||
Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
framework.Failf("can not construct firewall rule for non-loadbalancer type service")
|
||||
}
|
||||
fw := compute.Firewall{}
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.GetLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck)
|
||||
fw.TargetTags = []string{nodeTag}
|
||||
fw.SourceRanges = gcecloud.LoadBalancerSrcRanges()
|
||||
healthCheckPort := gcecloud.GetNodesHealthCheckPort()
|
||||
@ -96,42 +97,6 @@ func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service,
|
||||
return &fw
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.ComputeServices().GA.Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
}
|
||||
|
||||
// GetNodeTags gets k8s node tag from one of the nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) []string {
|
||||
nodes := GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
Logf("GetNodeTags: Found 0 node.")
|
||||
return []string{}
|
||||
}
|
||||
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
|
||||
}
|
||||
|
||||
// GetInstancePrefix returns the INSTANCE_PREFIX env we set for e2e cluster.
|
||||
// From cluster/gce/config-test.sh, master name is set up using below format:
|
||||
// MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
@ -436,8 +401,8 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
|
||||
return nil
|
||||
}
|
||||
|
||||
func WaitForFirewallRule(gceCloud *gcecloud.GCECloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
|
||||
Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
|
||||
func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
|
||||
framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
|
||||
var fw *compute.Firewall
|
||||
var err error
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
package gce
|
||||
|
||||
import "testing"
|
||||
|
376
vendor/k8s.io/kubernetes/test/e2e/framework/providers/gce/gce.go
generated
vendored
Normal file
376
vendor/k8s.io/kubernetes/test/e2e/framework/providers/gce/gce.go
generated
vendored
Normal file
@ -0,0 +1,376 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("gce", factory)
|
||||
framework.RegisterProvider("gke", factory)
|
||||
}
|
||||
|
||||
func factory() (framework.ProviderInterface, error) {
|
||||
framework.Logf("Fetching cloud provider for %q\r", framework.TestContext.Provider)
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
region := framework.TestContext.CloudConfig.Region
|
||||
|
||||
var err error
|
||||
if region == "" {
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
managedZones := []string{} // Manage all zones in the region
|
||||
if !framework.TestContext.CloudConfig.MultiZone {
|
||||
managedZones = []string{zone}
|
||||
}
|
||||
|
||||
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
|
||||
APIEndpoint: framework.TestContext.CloudConfig.ApiEndpoint,
|
||||
ProjectID: framework.TestContext.CloudConfig.ProjectID,
|
||||
Region: region,
|
||||
Zone: zone,
|
||||
ManagedZones: managedZones,
|
||||
NetworkName: "", // TODO: Change this to use framework.TestContext.CloudConfig.Network?
|
||||
SubnetworkName: "",
|
||||
NodeTags: nil,
|
||||
NodeInstancePrefix: "",
|
||||
TokenSource: nil,
|
||||
UseMetadataServer: false,
|
||||
AlphaFeatureGate: gcecloud.NewAlphaFeatureGate([]string{}),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err)
|
||||
}
|
||||
|
||||
// Arbitrarily pick one of the zones we have nodes in
|
||||
if framework.TestContext.CloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
framework.TestContext.CloudConfig.Zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
return NewProvider(gceCloud), nil
|
||||
}
|
||||
|
||||
func NewProvider(gceCloud *gcecloud.Cloud) framework.ProviderInterface {
|
||||
return &Provider{
|
||||
gceCloud: gceCloud,
|
||||
}
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
gceCloud *gcecloud.Cloud
|
||||
}
|
||||
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resize node instance group %s: %s", group, output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) GetGroupNodes(group string) ([]string, error) {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
}
|
||||
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
}
|
||||
|
||||
func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
project := framework.TestContext.CloudConfig.ProjectID
|
||||
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err)
|
||||
}
|
||||
|
||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
service := p.gceCloud.ComputeServices().GA
|
||||
list, err := service.ForwardingRules.List(project, region).Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range list.Items {
|
||||
if item.PortRange == portRange && item.IPAddress == ip {
|
||||
framework.Logf("found a load balancer: %v", item)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func getGCEZoneForGroup(group string) (string, error) {
|
||||
zone := framework.TestContext.CloudConfig.Zone
|
||||
if framework.TestContext.CloudConfig.MultiZone {
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list",
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output)
|
||||
}
|
||||
zone = strings.TrimSpace(string(output))
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePD(zone string) (string, error) {
|
||||
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
|
||||
|
||||
if zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := p.gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
if err := p.gceCloud.CreateDisk(pdName, gcecloud.DiskTypeStandard, zone, 2 /* sizeGb */, tags); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pdName, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePD(pdName string) error {
|
||||
err := p.gceCloud.DeleteDisk(pdName)
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
framework.Logf("error deleting PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
return framework.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||
}
|
||||
|
||||
// CleanupResources cleans up GCE Service Type=LoadBalancer resources with
|
||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if pollErr := wait.Poll(5*time.Second, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||
framework.Logf("Still waiting for glbc to cleanup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Failed to cleanup service GCE resources.")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
|
||||
if region == "" {
|
||||
// Attempt to parse region from zone if no region is given.
|
||||
var err error
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = err
|
||||
}
|
||||
if err := p.gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
|
||||
}
|
||||
if err := p.gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
clusterID, err := GetClusterID(c)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
return
|
||||
}
|
||||
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
|
||||
hc, getErr := p.gceCloud.GetHTTPHealthCheck(loadBalancerName)
|
||||
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
|
||||
return
|
||||
}
|
||||
if hc != nil {
|
||||
hcNames = append(hcNames, hc.Name)
|
||||
}
|
||||
if err := p.gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Provider) LoadBalancerSrcRanges() []string {
|
||||
return gcecloud.LoadBalancerSrcRanges()
|
||||
}
|
||||
|
||||
func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetInstanceTags gets tags from GCE instance with given name.
|
||||
func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *compute.Tags {
|
||||
gceCloud := cloudConfig.Provider.(*Provider).gceCloud
|
||||
res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
|
||||
instanceName).Do()
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get instance tags for %v: %v", instanceName, err)
|
||||
}
|
||||
return res.Tags
|
||||
}
|
||||
|
||||
// SetInstanceTags sets tags on GCE instance with given name.
|
||||
func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone string, tags []string) []string {
|
||||
gceCloud := cloudConfig.Provider.(*Provider).gceCloud
|
||||
// Re-get instance everytime because we need the latest fingerprint for updating metadata
|
||||
resTags := GetInstanceTags(cloudConfig, instanceName)
|
||||
_, err := gceCloud.ComputeServices().GA.Instances.SetTags(
|
||||
cloudConfig.ProjectID, zone, instanceName,
|
||||
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
|
||||
if err != nil {
|
||||
framework.Failf("failed to set instance tags: %v", err)
|
||||
}
|
||||
framework.Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
|
||||
return resTags.Items
|
||||
}
|
||||
|
||||
// GetNodeTags gets k8s node tag from one of the nodes
|
||||
func GetNodeTags(c clientset.Interface, cloudConfig framework.CloudConfig) []string {
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
if len(nodes.Items) == 0 {
|
||||
framework.Logf("GetNodeTags: Found 0 node.")
|
||||
return []string{}
|
||||
}
|
||||
return GetInstanceTags(cloudConfig, nodes.Items[0].Name).Items
|
||||
}
|
||||
|
||||
// IsHTTPErrorCode returns true if the error is a google api
|
||||
// error matching the corresponding HTTP error code.
|
||||
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
func GetGCECloud() (*gcecloud.Cloud, error) {
|
||||
p, ok := framework.TestContext.CloudConfig.Provider.(*Provider)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCE provider: %#v", framework.TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return p.gceCloud, nil
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
817
vendor/k8s.io/kubernetes/test/e2e/framework/providers/gce/ingress.go
generated
vendored
Normal file
817
vendor/k8s.io/kubernetes/test/e2e/framework/providers/gce/ingress.go
generated
vendored
Normal file
@ -0,0 +1,817 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gce
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name of the config-map and key the ingress controller stores its uid in.
|
||||
uidConfigMap = "ingress-uid"
|
||||
uidKey = "uid"
|
||||
|
||||
// all cloud resources created by the ingress controller start with this
|
||||
// prefix.
|
||||
k8sPrefix = "k8s-"
|
||||
|
||||
// clusterDelimiter is the delimiter used by the ingress controller
|
||||
// to split uid from other naming/metadata.
|
||||
clusterDelimiter = "--"
|
||||
|
||||
// Cloud resources created by the ingress controller older than this
|
||||
// are automatically purged to prevent running out of quota.
|
||||
// TODO(37335): write soak tests and bump this up to a week.
|
||||
maxAge = 48 * time.Hour
|
||||
|
||||
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
|
||||
// a single character of padding.
|
||||
nameLenLimit = 62
|
||||
)
|
||||
|
||||
// GCEIngressController manages implementation details of Ingress on GCE/GKE.
|
||||
type GCEIngressController struct {
|
||||
Ns string
|
||||
rcPath string
|
||||
UID string
|
||||
staticIPName string
|
||||
rc *v1.ReplicationController
|
||||
svc *v1.Service
|
||||
Client clientset.Interface
|
||||
Cloud framework.CloudConfig
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) CleanupGCEIngressController() error {
|
||||
return cont.CleanupGCEIngressControllerWithTimeout(framework.LoadBalancerCleanupTimeout)
|
||||
}
|
||||
|
||||
// CleanupGCEIngressControllerWithTimeout calls the GCEIngressController.Cleanup(false)
|
||||
// followed with deleting the static ip, and then a final GCEIngressController.Cleanup(true)
|
||||
func (cont *GCEIngressController) CleanupGCEIngressControllerWithTimeout(timeout time.Duration) error {
|
||||
pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
|
||||
if err := cont.Cleanup(false); err != nil {
|
||||
framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
// Always try to cleanup even if pollErr == nil, because the cleanup
|
||||
// routine also purges old leaked resources based on creation timestamp.
|
||||
By("Performing final delete of any remaining resources")
|
||||
if cleanupErr := cont.Cleanup(true); cleanupErr != nil {
|
||||
By(fmt.Sprintf("WARNING: possibly leaked resources: %v\n", cleanupErr))
|
||||
} else {
|
||||
By("No resources leaked.")
|
||||
}
|
||||
|
||||
// Static-IP allocated on behalf of the test, never deleted by the
|
||||
// controller. Delete this IP only after the controller has had a chance
|
||||
// to cleanup or it might interfere with the controller, causing it to
|
||||
// throw out confusing events.
|
||||
if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
|
||||
if err := cont.deleteStaticIPs(); err != nil {
|
||||
framework.Logf("Failed to delete static-ip: %v\n", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); ipErr != nil {
|
||||
// If this is a persistent error, the suite will fail when we run out
|
||||
// of quota anyway.
|
||||
By(fmt.Sprintf("WARNING: possibly leaked static IP: %v\n", ipErr))
|
||||
}
|
||||
|
||||
// Logging that the GLBC failed to cleanup GCE resources on ingress deletion
|
||||
// See kubernetes/ingress#431
|
||||
if pollErr != nil {
|
||||
return fmt.Errorf("error: L7 controller failed to delete all cloud resources on time. %v", pollErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) getL7AddonUID() (string, error) {
|
||||
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
|
||||
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if uid, ok := cm.Data[uidKey]; ok {
|
||||
return uid, nil
|
||||
}
|
||||
return "", fmt.Errorf("Could not find cluster UID for L7 addon pod")
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListGlobalForwardingRules() []*compute.ForwardingRule {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
fwdList := []*compute.ForwardingRule{}
|
||||
l, err := gceCloud.ListGlobalForwardingRules()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, fwd := range l {
|
||||
if cont.isOwned(fwd.Name) {
|
||||
fwdList = append(fwdList, fwd)
|
||||
}
|
||||
}
|
||||
return fwdList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteForwardingRule(del bool) string {
|
||||
msg := ""
|
||||
fwList := []compute.ForwardingRule{}
|
||||
for _, regex := range []string{fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter), fmt.Sprintf("%vfws-.*%v.*", k8sPrefix, clusterDelimiter)} {
|
||||
gcloudComputeResourceList("forwarding-rules", regex, cont.Cloud.ProjectID, &fwList)
|
||||
if len(fwList) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, f := range fwList {
|
||||
if !cont.canDelete(f.Name, f.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("forwarding-rules", f.Name, cont.Cloud.ProjectID, "--global")
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (forwarding rule)\n", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) GetGlobalAddress(ipName string) *compute.Address {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
ip, err := gceCloud.GetGlobalAddress(ipName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return ip
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteAddresses(del bool) string {
|
||||
msg := ""
|
||||
ipList := []compute.Address{}
|
||||
regex := fmt.Sprintf("%vfw-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("addresses", regex, cont.Cloud.ProjectID, &ipList)
|
||||
if len(ipList) != 0 {
|
||||
for _, ip := range ipList {
|
||||
if !cont.canDelete(ip.Name, ip.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("addresses", ip.Name, cont.Cloud.ProjectID, "--global")
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (static-ip)\n", ip.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListTargetHttpProxies() []*compute.TargetHttpProxy {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
tpList := []*compute.TargetHttpProxy{}
|
||||
l, err := gceCloud.ListTargetHTTPProxies()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, tp := range l {
|
||||
if cont.isOwned(tp.Name) {
|
||||
tpList = append(tpList, tp)
|
||||
}
|
||||
}
|
||||
return tpList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListTargetHttpsProxies() []*compute.TargetHttpsProxy {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
tpsList := []*compute.TargetHttpsProxy{}
|
||||
l, err := gceCloud.ListTargetHTTPSProxies()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, tps := range l {
|
||||
if cont.isOwned(tps.Name) {
|
||||
tpsList = append(tpsList, tps)
|
||||
}
|
||||
}
|
||||
return tpsList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteTargetProxy(del bool) string {
|
||||
msg := ""
|
||||
tpList := []compute.TargetHttpProxy{}
|
||||
regex := fmt.Sprintf("%vtp-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("target-http-proxies", regex, cont.Cloud.ProjectID, &tpList)
|
||||
if len(tpList) != 0 {
|
||||
for _, t := range tpList {
|
||||
if !cont.canDelete(t.Name, t.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("target-http-proxies", t.Name, cont.Cloud.ProjectID)
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (target-http-proxy)\n", t.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
tpsList := []compute.TargetHttpsProxy{}
|
||||
regex = fmt.Sprintf("%vtps-.*%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("target-https-proxies", regex, cont.Cloud.ProjectID, &tpsList)
|
||||
if len(tpsList) != 0 {
|
||||
for _, t := range tpsList {
|
||||
if !cont.canDelete(t.Name, t.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("target-https-proxies", t.Name, cont.Cloud.ProjectID)
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (target-https-proxy)\n", t.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListUrlMaps() []*compute.UrlMap {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
umList := []*compute.UrlMap{}
|
||||
l, err := gceCloud.ListURLMaps()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, um := range l {
|
||||
if cont.isOwned(um.Name) {
|
||||
umList = append(umList, um)
|
||||
}
|
||||
}
|
||||
return umList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteURLMap(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
umList, err := gceCloud.ListURLMaps()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list url maps: %v", err)
|
||||
}
|
||||
if len(umList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, um := range umList {
|
||||
if !cont.canDelete(um.Name, um.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting url-map: %s", um.Name)
|
||||
if err := gceCloud.DeleteURLMap(um.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete url map %v\n", um.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (url-map)\n", um.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListGlobalBackendServices() []*compute.BackendService {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList := []*compute.BackendService{}
|
||||
l, err := gceCloud.ListGlobalBackendServices()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, be := range l {
|
||||
if cont.isOwned(be.Name) {
|
||||
beList = append(beList, be)
|
||||
}
|
||||
}
|
||||
return beList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteBackendService(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list backend services: %v", err)
|
||||
}
|
||||
if len(beList) == 0 {
|
||||
framework.Logf("No backend services found")
|
||||
return msg
|
||||
}
|
||||
for _, be := range beList {
|
||||
if !cont.canDelete(be.Name, be.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting backed-service: %s", be.Name)
|
||||
if err := gceCloud.DeleteGlobalBackendService(be.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete backend service %v: %v\n", be.Name, err)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (backend-service)\n", be.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteHTTPHealthCheck(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
hcList, err := gceCloud.ListHTTPHealthChecks()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list HTTP health checks: %v", err)
|
||||
}
|
||||
if len(hcList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, hc := range hcList {
|
||||
if !cont.canDelete(hc.Name, hc.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting http-health-check: %s", hc.Name)
|
||||
if err := gceCloud.DeleteHTTPHealthCheck(hc.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete HTTP health check %v\n", hc.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (http-health-check)\n", hc.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListSslCertificates() []*compute.SslCertificate {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
sslList := []*compute.SslCertificate{}
|
||||
l, err := gceCloud.ListSslCertificates()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ssl := range l {
|
||||
if cont.isOwned(ssl.Name) {
|
||||
sslList = append(sslList, ssl)
|
||||
}
|
||||
}
|
||||
return sslList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteSSLCertificate(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
sslList, err := gceCloud.ListSslCertificates()
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list ssl certificates: %v", err)
|
||||
}
|
||||
if len(sslList) != 0 {
|
||||
for _, s := range sslList {
|
||||
if !cont.canDelete(s.Name, s.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting ssl-certificate: %s", s.Name)
|
||||
if err := gceCloud.DeleteSslCertificate(s.Name); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete ssl certificates: %v\n", s.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (ssl-certificate)\n", s.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) ListInstanceGroups() []*compute.InstanceGroup {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
igList := []*compute.InstanceGroup{}
|
||||
l, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ig := range l {
|
||||
if cont.isOwned(ig.Name) {
|
||||
igList = append(igList, ig)
|
||||
}
|
||||
}
|
||||
return igList
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteInstanceGroup(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
// TODO: E2E cloudprovider has only 1 zone, but the cluster can have many.
|
||||
// We need to poll on all IGs across all zones.
|
||||
igList, err := gceCloud.ListInstanceGroups(cont.Cloud.Zone)
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
return fmt.Sprintf("Failed to list instance groups: %v", err)
|
||||
}
|
||||
if len(igList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, ig := range igList {
|
||||
if !cont.canDelete(ig.Name, ig.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting instance-group: %s", ig.Name)
|
||||
if err := gceCloud.DeleteInstanceGroup(ig.Name, cont.Cloud.Zone); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete instance group %v\n", ig.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (instance-group)\n", ig.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteNetworkEndpointGroup(del bool) (msg string) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
// TODO: E2E cloudprovider has only 1 zone, but the cluster can have many.
|
||||
// We need to poll on all NEGs across all zones.
|
||||
negList, err := gceCloud.ListNetworkEndpointGroup(cont.Cloud.Zone)
|
||||
if err != nil {
|
||||
if cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
return msg
|
||||
}
|
||||
// Do not return error as NEG is still alpha.
|
||||
framework.Logf("Failed to list network endpoint group: %v", err)
|
||||
return msg
|
||||
}
|
||||
if len(negList) == 0 {
|
||||
return msg
|
||||
}
|
||||
for _, neg := range negList {
|
||||
if !cont.canDeleteNEG(neg.Name, neg.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
framework.Logf("Deleting network-endpoint-group: %s", neg.Name)
|
||||
if err := gceCloud.DeleteNetworkEndpointGroup(neg.Name, cont.Cloud.Zone); err != nil &&
|
||||
!cont.isHTTPErrorCode(err, http.StatusNotFound) {
|
||||
msg += fmt.Sprintf("Failed to delete network endpoint group %v\n", neg.Name)
|
||||
}
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (network-endpoint-group)\n", neg.Name)
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// canDelete returns true if either the name ends in a suffix matching this
|
||||
// controller's UID, or the creationTimestamp exceeds the maxAge and del is set
|
||||
// to true. Always returns false if the name doesn't match that we expect for
|
||||
// Ingress cloud resources.
|
||||
func (cont *GCEIngressController) canDelete(resourceName, creationTimestamp string, delOldResources bool) bool {
|
||||
// ignore everything not created by an ingress controller.
|
||||
splitName := strings.Split(resourceName, clusterDelimiter)
|
||||
if !strings.HasPrefix(resourceName, k8sPrefix) || len(splitName) != 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Resources created by the GLBC have a "0"" appended to the end if truncation
|
||||
// occurred. Removing the zero allows the following match.
|
||||
truncatedClusterUID := splitName[1]
|
||||
if len(truncatedClusterUID) >= 1 && strings.HasSuffix(truncatedClusterUID, "0") {
|
||||
truncatedClusterUID = truncatedClusterUID[:len(truncatedClusterUID)-1]
|
||||
}
|
||||
|
||||
// always delete things that are created by the current ingress controller.
|
||||
// Because of resource name truncation, this looks for a common prefix
|
||||
if strings.HasPrefix(cont.UID, truncatedClusterUID) {
|
||||
return true
|
||||
}
|
||||
if !delOldResources {
|
||||
return false
|
||||
}
|
||||
return canDeleteWithTimestamp(resourceName, creationTimestamp)
|
||||
}
|
||||
|
||||
// isOwned returns true if the resourceName ends in a suffix matching this
|
||||
// controller UID.
|
||||
func (cont *GCEIngressController) isOwned(resourceName string) bool {
|
||||
return cont.canDelete(resourceName, "", false)
|
||||
}
|
||||
|
||||
// canDeleteNEG returns true if either the name contains this controller's UID,
|
||||
// or the creationTimestamp exceeds the maxAge and del is set to true.
|
||||
func (cont *GCEIngressController) canDeleteNEG(resourceName, creationTimestamp string, delOldResources bool) bool {
|
||||
if !strings.HasPrefix(resourceName, "k8s") {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.Contains(resourceName, cont.UID) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !delOldResources {
|
||||
return false
|
||||
}
|
||||
|
||||
return canDeleteWithTimestamp(resourceName, creationTimestamp)
|
||||
}
|
||||
|
||||
func canDeleteWithTimestamp(resourceName, creationTimestamp string) bool {
|
||||
createdTime, err := time.Parse(time.RFC3339, creationTimestamp)
|
||||
if err != nil {
|
||||
framework.Logf("WARNING: Failed to parse creation timestamp %v for %v: %v", creationTimestamp, resourceName, err)
|
||||
return false
|
||||
}
|
||||
if time.Since(createdTime) > maxAge {
|
||||
framework.Logf("%v created on %v IS too old", resourceName, creationTimestamp)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetFirewallRuleName returns the name of the firewall used for the GCEIngressController.
|
||||
func (cont *GCEIngressController) GetFirewallRuleName() string {
|
||||
return fmt.Sprintf("%vfw-l7%v%v", k8sPrefix, clusterDelimiter, cont.UID)
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Causes a fatal error incase of an error.
|
||||
// TODO: Rename this to GetFirewallRuleOrDie and similarly rename all other
|
||||
// methods here to be consistent with rest of the code in this repo.
|
||||
func (cont *GCEIngressController) GetFirewallRule() *compute.Firewall {
|
||||
fw, err := cont.GetFirewallRuleOrError()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return fw
|
||||
}
|
||||
|
||||
// GetFirewallRule returns the firewall used by the GCEIngressController.
|
||||
// Returns an error if that fails.
|
||||
// TODO: Rename this to GetFirewallRule when the above method with that name is renamed.
|
||||
func (cont *GCEIngressController) GetFirewallRuleOrError() (*compute.Firewall, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
fwName := cont.GetFirewallRuleName()
|
||||
return gceCloud.GetFirewall(fwName)
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) deleteFirewallRule(del bool) (msg string) {
|
||||
fwList := []compute.Firewall{}
|
||||
regex := fmt.Sprintf("%vfw-l7%v.*", k8sPrefix, clusterDelimiter)
|
||||
gcloudComputeResourceList("firewall-rules", regex, cont.Cloud.ProjectID, &fwList)
|
||||
if len(fwList) != 0 {
|
||||
for _, f := range fwList {
|
||||
if !cont.canDelete(f.Name, f.CreationTimestamp, del) {
|
||||
continue
|
||||
}
|
||||
if del {
|
||||
GcloudComputeResourceDelete("firewall-rules", f.Name, cont.Cloud.ProjectID)
|
||||
} else {
|
||||
msg += fmt.Sprintf("%v (firewall rule)\n", f.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) isHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
// BackendServiceUsingNEG returns true only if all global backend service with matching nodeports pointing to NEG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingNEG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "networkEndpointGroups")
|
||||
}
|
||||
|
||||
// BackendServiceUsingIG returns true only if all global backend service with matching svcPorts pointing to IG as backend
|
||||
func (cont *GCEIngressController) BackendServiceUsingIG(svcPorts map[string]v1.ServicePort) (bool, error) {
|
||||
return cont.backendMode(svcPorts, "instanceGroups")
|
||||
}
|
||||
|
||||
func (cont *GCEIngressController) backendMode(svcPorts map[string]v1.ServicePort, keyword string) (bool, error) {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
beList, err := gceCloud.ListGlobalBackendServices()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list backend services: %v", err)
|
||||
}
|
||||
|
||||
hcList, err := gceCloud.ListHealthChecks()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list health checks: %v", err)
|
||||
}
|
||||
|
||||
uid := cont.UID
|
||||
if len(uid) > 8 {
|
||||
uid = uid[:8]
|
||||
}
|
||||
|
||||
matchingBackendService := 0
|
||||
for svcName, sp := range svcPorts {
|
||||
match := false
|
||||
bsMatch := &compute.BackendService{}
|
||||
// Non-NEG BackendServices are named with the Nodeport in the name.
|
||||
// NEG BackendServices' names contain the a sha256 hash of a string.
|
||||
negString := strings.Join([]string{uid, cont.Ns, svcName, fmt.Sprintf("%v", sp.Port)}, ";")
|
||||
negHash := fmt.Sprintf("%x", sha256.Sum256([]byte(negString)))[:8]
|
||||
for _, bs := range beList {
|
||||
if strings.Contains(bs.Name, strconv.Itoa(int(sp.NodePort))) ||
|
||||
strings.Contains(bs.Name, negHash) {
|
||||
match = true
|
||||
bsMatch = bs
|
||||
matchingBackendService += 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
for _, be := range bsMatch.Backends {
|
||||
if !strings.Contains(be.Group, keyword) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the correct HealthCheck exists for the BackendService
|
||||
hcMatch := false
|
||||
for _, hc := range hcList {
|
||||
if hc.Name == bsMatch.Name {
|
||||
hcMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !hcMatch {
|
||||
return false, fmt.Errorf("missing healthcheck for backendservice: %v", bsMatch.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return matchingBackendService == len(svcPorts), nil
|
||||
}
|
||||
|
||||
// Cleanup cleans up cloud resources.
|
||||
// If del is false, it simply reports existing resources without deleting them.
|
||||
// If dle is true, it deletes resources it finds acceptable (see canDelete func).
|
||||
func (cont *GCEIngressController) Cleanup(del bool) error {
|
||||
// Ordering is important here because we cannot delete resources that other
|
||||
// resources hold references to.
|
||||
errMsg := cont.deleteForwardingRule(del)
|
||||
// Static IPs are named after forwarding rules.
|
||||
errMsg += cont.deleteAddresses(del)
|
||||
|
||||
errMsg += cont.deleteTargetProxy(del)
|
||||
errMsg += cont.deleteURLMap(del)
|
||||
errMsg += cont.deleteBackendService(del)
|
||||
errMsg += cont.deleteHTTPHealthCheck(del)
|
||||
|
||||
errMsg += cont.deleteInstanceGroup(del)
|
||||
errMsg += cont.deleteNetworkEndpointGroup(del)
|
||||
errMsg += cont.deleteFirewallRule(del)
|
||||
errMsg += cont.deleteSSLCertificate(del)
|
||||
|
||||
// TODO: Verify instance-groups, issue #16636. Gcloud mysteriously barfs when told
|
||||
// to unmarshal instance groups into the current vendored gce-client's understanding
|
||||
// of the struct.
|
||||
if errMsg == "" {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
// Init initializes the GCEIngressController with an UID
|
||||
func (cont *GCEIngressController) Init() error {
|
||||
uid, err := cont.getL7AddonUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cont.UID = uid
|
||||
// There's a name limit imposed by GCE. The controller will truncate.
|
||||
testName := fmt.Sprintf("k8s-fw-foo-app-X-%v--%v", cont.Ns, cont.UID)
|
||||
if len(testName) > nameLenLimit {
|
||||
framework.Logf("WARNING: test name including cluster UID: %v is over the GCE limit of %v", testName, nameLenLimit)
|
||||
} else {
|
||||
framework.Logf("Detected cluster UID %v", cont.UID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateStaticIP allocates a random static ip with the given name. Returns a string
|
||||
// representation of the ip. Caller is expected to manage cleanup of the ip by
|
||||
// invoking deleteStaticIPs.
|
||||
func (cont *GCEIngressController) CreateStaticIP(name string) string {
|
||||
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
|
||||
addr := &compute.Address{Name: name}
|
||||
if err := gceCloud.ReserveGlobalAddress(addr); err != nil {
|
||||
if delErr := gceCloud.DeleteGlobalAddress(name); delErr != nil {
|
||||
if cont.isHTTPErrorCode(delErr, http.StatusNotFound) {
|
||||
framework.Logf("Static ip with name %v was not allocated, nothing to delete", name)
|
||||
} else {
|
||||
framework.Logf("Failed to delete static ip %v: %v", name, delErr)
|
||||
}
|
||||
}
|
||||
framework.Failf("Failed to allocate static ip %v: %v", name, err)
|
||||
}
|
||||
|
||||
ip, err := gceCloud.GetGlobalAddress(name)
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get newly created static ip %v: %v", name, err)
|
||||
}
|
||||
|
||||
cont.staticIPName = ip.Name
|
||||
framework.Logf("Reserved static ip %v: %v", cont.staticIPName, ip.Address)
|
||||
return ip.Address
|
||||
}
|
||||
|
||||
// deleteStaticIPs delets all static-ips allocated through calls to
|
||||
// CreateStaticIP.
|
||||
func (cont *GCEIngressController) deleteStaticIPs() error {
|
||||
if cont.staticIPName != "" {
|
||||
if err := GcloudComputeResourceDelete("addresses", cont.staticIPName, cont.Cloud.ProjectID, "--global"); err == nil {
|
||||
cont.staticIPName = ""
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
e2eIPs := []compute.Address{}
|
||||
gcloudComputeResourceList("addresses", "e2e-.*", cont.Cloud.ProjectID, &e2eIPs)
|
||||
ips := []string{}
|
||||
for _, ip := range e2eIPs {
|
||||
ips = append(ips, ip.Name)
|
||||
}
|
||||
framework.Logf("None of the remaining %d static-ips were created by this e2e: %v", len(ips), strings.Join(ips, ", "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// gcloudComputeResourceList unmarshals json output of gcloud into given out interface.
|
||||
func gcloudComputeResourceList(resource, regex, project string, out interface{}) {
|
||||
// gcloud prints a message to stderr if it has an available update
|
||||
// so we only look at stdout.
|
||||
command := []string{
|
||||
"compute", resource, "list",
|
||||
fmt.Sprintf("--filter='name ~ \"%q\"'", regex),
|
||||
fmt.Sprintf("--project=%v", project),
|
||||
"-q", "--format=json",
|
||||
}
|
||||
output, err := exec.Command("gcloud", command...).Output()
|
||||
if err != nil {
|
||||
errCode := -1
|
||||
errMsg := ""
|
||||
if exitErr, ok := err.(utilexec.ExitError); ok {
|
||||
errCode = exitErr.ExitStatus()
|
||||
errMsg = exitErr.Error()
|
||||
if osExitErr, ok := err.(*exec.ExitError); ok {
|
||||
errMsg = fmt.Sprintf("%v, stderr %v", errMsg, string(osExitErr.Stderr))
|
||||
}
|
||||
}
|
||||
framework.Logf("Error running gcloud command 'gcloud %s': err: %v, output: %v, status: %d, msg: %v", strings.Join(command, " "), err, string(output), errCode, errMsg)
|
||||
}
|
||||
if err := json.Unmarshal([]byte(output), out); err != nil {
|
||||
framework.Logf("Error unmarshalling gcloud output for %v: %v, output: %v", resource, err, string(output))
|
||||
}
|
||||
}
|
||||
|
||||
// GcloudComputeResourceDelete deletes the specified compute resource by name and project.
|
||||
func GcloudComputeResourceDelete(resource, name, project string, args ...string) error {
|
||||
framework.Logf("Deleting %v: %v", resource, name)
|
||||
argList := append([]string{"compute", resource, "delete", name, fmt.Sprintf("--project=%v", project), "-q"}, args...)
|
||||
output, err := exec.Command("gcloud", argList...).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Error deleting %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GcloudComputeResourceCreate creates a compute resource with a name and arguments.
|
||||
func GcloudComputeResourceCreate(resource, name, project string, args ...string) error {
|
||||
framework.Logf("Creating %v in project %v: %v", resource, project, name)
|
||||
argsList := append([]string{"compute", resource, "create", name, fmt.Sprintf("--project=%v", project)}, args...)
|
||||
framework.Logf("Running command: gcloud %+v", strings.Join(argsList, " "))
|
||||
output, err := exec.Command("gcloud", argsList...).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Error creating %v, output: %v\nerror: %+v", resource, string(output), err)
|
||||
}
|
||||
return err
|
||||
}
|
30
vendor/k8s.io/kubernetes/test/e2e/framework/providers/kubemark/BUILD
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/test/e2e/framework/providers/kubemark/BUILD
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["kubemark.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/providers/kubemark",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
91
vendor/k8s.io/kubernetes/test/e2e/framework/providers/kubemark/kubemark.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/test/e2e/framework/providers/kubemark/kubemark.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubemark
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
kubemarkExternalKubeConfig = flag.String(fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.")
|
||||
)
|
||||
|
||||
func init() {
|
||||
framework.RegisterProvider("kubemark", NewProvider)
|
||||
}
|
||||
|
||||
func NewProvider() (framework.ProviderInterface, error) {
|
||||
// Actual initialization happens when the e2e framework gets constructed.
|
||||
return &Provider{}, nil
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
framework.NullProvider
|
||||
controller *kubemark.KubemarkController
|
||||
closeChannel chan struct{}
|
||||
}
|
||||
|
||||
func (p *Provider) ResizeGroup(group string, size int32) error {
|
||||
return p.controller.SetNodeGroupSize(group, int(size))
|
||||
}
|
||||
|
||||
func (p *Provider) GetGroupNodes(group string) ([]string, error) {
|
||||
return p.controller.GetNodeNamesForNodeGroup(group)
|
||||
}
|
||||
|
||||
func (p *Provider) FrameworkBeforeEach(f *framework.Framework) {
|
||||
if *kubemarkExternalKubeConfig != "" && p.controller == nil {
|
||||
externalConfig, err := clientcmd.BuildConfigFromFlags("", *kubemarkExternalKubeConfig)
|
||||
externalConfig.QPS = f.Options.ClientQPS
|
||||
externalConfig.Burst = f.Options.ClientBurst
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalClient, err := clientset.NewForConfig(externalConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
f.KubemarkExternalClusterClientSet = externalClient
|
||||
p.closeChannel = make(chan struct{})
|
||||
externalInformerFactory := informers.NewSharedInformerFactory(externalClient, 0)
|
||||
kubemarkInformerFactory := informers.NewSharedInformerFactory(f.ClientSet, 0)
|
||||
kubemarkNodeInformer := kubemarkInformerFactory.Core().V1().Nodes()
|
||||
go kubemarkNodeInformer.Informer().Run(p.closeChannel)
|
||||
p.controller, err = kubemark.NewKubemarkController(externalClient, externalInformerFactory, f.ClientSet, kubemarkNodeInformer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
externalInformerFactory.Start(p.closeChannel)
|
||||
Expect(p.controller.WaitForCacheSync(p.closeChannel)).To(BeTrue())
|
||||
go p.controller.Run(p.closeChannel)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) FrameworkAfterEach(f *framework.Framework) {
|
||||
if p.closeChannel != nil {
|
||||
close(p.closeChannel)
|
||||
p.controller = nil
|
||||
p.closeChannel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) GroupSize(group string) (int, error) {
|
||||
return p.controller.GetNodeGroupSize(group)
|
||||
}
|
38
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
38
vendor/k8s.io/kubernetes/test/e2e/framework/psp_util.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -42,33 +42,33 @@ var (
|
||||
)
|
||||
|
||||
// Creates a PodSecurityPolicy that allows everything.
|
||||
func PrivilegedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
func PrivilegedPSP(name string) *policy.PodSecurityPolicy {
|
||||
allowPrivilegeEscalation := true
|
||||
return &extensionsv1beta1.PodSecurityPolicy{
|
||||
return &policy.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Spec: policy.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
AllowedCapabilities: []corev1.Capability{"*"},
|
||||
Volumes: []extensionsv1beta1.FSType{extensionsv1beta1.All},
|
||||
Volumes: []policy.FSType{policy.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []extensionsv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostPorts: []policy.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyRunAsAny,
|
||||
RunAsUser: policy.RunAsUserStrategyOptions{
|
||||
Rule: policy.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
SELinux: policy.SELinuxStrategyOptions{
|
||||
Rule: policy.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
SupplementalGroups: policy.SupplementalGroupsStrategyOptions{
|
||||
Rule: policy.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
FSGroup: policy.FSGroupStrategyOptions{
|
||||
Rule: policy.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
AllowedUnsafeSysctls: []string{"*"},
|
||||
@ -112,8 +112,10 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
|
||||
}
|
||||
|
||||
psp := PrivilegedPSP(podSecurityPolicyPrivileged)
|
||||
psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
psp, err = f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(psp)
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
}
|
||||
|
||||
if IsRBACEnabled(f) {
|
||||
// Create the Role to bind it to the namespace.
|
||||
@ -126,7 +128,9 @@ func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
if !apierrs.IsAlreadyExists(err) {
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
203
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
203
vendor/k8s.io/kubernetes/test/e2e/framework/pv_util.go
generated
vendored
@ -18,16 +18,9 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -36,8 +29,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -48,6 +39,12 @@ const (
|
||||
VolumeSelectorKey = "e2e-pv-pool"
|
||||
)
|
||||
|
||||
var (
|
||||
// Common selinux labels
|
||||
SELinuxLabel = &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
)
|
||||
|
||||
// Map of all PVs used in the multi pv-pvc tests. The key is the PV's name, which is
|
||||
// guaranteed to be unique. The value is {} (empty struct) since we're only interested
|
||||
// in the PV's name and if it is present. We must always Get the pv object before
|
||||
@ -501,22 +498,28 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
|
||||
// Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error {
|
||||
const maxWait = 5 * time.Minute
|
||||
if pod == nil {
|
||||
return nil
|
||||
}
|
||||
Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
|
||||
err := c.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil)
|
||||
return DeletePodWithWaitByName(f, c, pod.GetName(), pod.GetNamespace())
|
||||
}
|
||||
|
||||
// Deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
|
||||
// not existing.
|
||||
func DeletePodWithWaitByName(f *Framework, c clientset.Interface, podName, podNamespace string) error {
|
||||
const maxWait = 5 * time.Minute
|
||||
Logf("Deleting pod %q in namespace %q", podName, podNamespace)
|
||||
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
|
||||
if err != nil {
|
||||
if apierrs.IsNotFound(err) {
|
||||
return nil // assume pod was already deleted
|
||||
}
|
||||
return fmt.Errorf("pod Delete API error: %v", err)
|
||||
}
|
||||
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, pod.Name)
|
||||
err = f.WaitForPodNotFound(pod.Name, maxWait)
|
||||
Logf("Wait up to %v for pod %q to be fully deleted", maxWait, podName)
|
||||
err = f.WaitForPodNotFound(podName, maxWait)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pod %q was not deleted: %v", pod.Name, err)
|
||||
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -545,19 +548,6 @@ func CreateWaitAndDeletePod(f *Framework, c clientset.Interface, ns string, pvc
|
||||
return // note: named return value
|
||||
}
|
||||
|
||||
// Sanity check for GCE testing. Verify the persistent disk attached to the node.
|
||||
func VerifyGCEDiskAttached(diskName string, nodeName types.NodeName) (bool, error) {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("GetGCECloud error: %v", err)
|
||||
}
|
||||
isAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot verify if GCE disk is attached: %v", err)
|
||||
}
|
||||
return isAttached, nil
|
||||
}
|
||||
|
||||
// Return a pvckey struct.
|
||||
func makePvcKey(ns, name string) types.NamespacedName {
|
||||
return types.NamespacedName{Namespace: ns, Name: name}
|
||||
@ -678,131 +668,15 @@ func DeletePDWithRetry(diskName string) error {
|
||||
return fmt.Errorf("unable to delete PD %q: %v", diskName, err)
|
||||
}
|
||||
|
||||
func newAWSClient(zone string) *ec2.EC2 {
|
||||
var cfg *aws.Config
|
||||
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
if zone == "" {
|
||||
glog.Warning("No AWS zone configured!")
|
||||
cfg = nil
|
||||
} else {
|
||||
region := zone[:len(zone)-1]
|
||||
cfg = &aws.Config{Region: aws.String(region)}
|
||||
}
|
||||
return ec2.New(session.New(), cfg)
|
||||
}
|
||||
|
||||
func createPD(zone string) (string, error) {
|
||||
if zone == "" {
|
||||
zone = TestContext.CloudConfig.Zone
|
||||
}
|
||||
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if zone == "" && TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
zone, _ = zones.PopAny()
|
||||
}
|
||||
|
||||
tags := map[string]string{}
|
||||
err = gceCloud.CreateDisk(pdName, gcecloud.DiskTypeSSD, zone, 10 /* sizeGb */, tags)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return pdName, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := newAWSClient(zone)
|
||||
request := &ec2.CreateVolumeInput{}
|
||||
request.AvailabilityZone = aws.String(zone)
|
||||
request.Size = aws.Int64(10)
|
||||
request.VolumeType = aws.String(awscloud.DefaultVolumeType)
|
||||
response, err := client.CreateVolume(request)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
az := aws.StringValue(response.AvailabilityZone)
|
||||
awsID := aws.StringValue(response.VolumeId)
|
||||
|
||||
volumeName := "aws://" + az + "/" + awsID
|
||||
return volumeName, nil
|
||||
} else if TestContext.Provider == "azure" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
azureCloud, err := GetAzureCloud()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return diskURI, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("provider does not support volume creation")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.CreatePD(zone)
|
||||
}
|
||||
|
||||
func deletePD(pdName string) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gceCloud.DeleteDisk(pdName)
|
||||
|
||||
if err != nil {
|
||||
if gerr, ok := err.(*googleapi.Error); ok && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "notFound" {
|
||||
// PD already exists, ignore error.
|
||||
return nil
|
||||
}
|
||||
|
||||
Logf("error deleting PD %q: %v", pdName, err)
|
||||
}
|
||||
return err
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := newAWSClient("")
|
||||
|
||||
tokens := strings.Split(pdName, "/")
|
||||
awsVolumeID := tokens[len(tokens)-1]
|
||||
|
||||
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(awsVolumeID)}
|
||||
_, err := client.DeleteVolume(request)
|
||||
if err != nil {
|
||||
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
|
||||
Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
|
||||
} else {
|
||||
return fmt.Errorf("error deleting EBS volumes: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else if TestContext.Provider == "azure" {
|
||||
azureCloud, err := GetAzureCloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = azureCloud.DeleteVolume(pdName)
|
||||
if err != nil {
|
||||
Logf("failed to delete Azure volume %q: %v", pdName, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("provider does not support volume deletion")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.DeletePD(pdName)
|
||||
}
|
||||
|
||||
// Returns a pod definition based on the namespace. The pod references the PVC's
|
||||
@ -1000,11 +874,20 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) {
|
||||
return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout)
|
||||
}
|
||||
|
||||
// create security pod with given claims
|
||||
func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
// Setting nodeName
|
||||
pod.Spec.NodeName = nodeName
|
||||
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pod Create API error: %v", err)
|
||||
}
|
||||
|
||||
// Waiting for pod to be running
|
||||
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout)
|
||||
if err != nil {
|
||||
@ -1071,33 +954,9 @@ func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return &v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
ReadOnly: false,
|
||||
},
|
||||
}, nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
return &v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: diskName,
|
||||
FSType: "ext3",
|
||||
},
|
||||
}, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Provider not supported")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.CreatePVSource(zone, diskName)
|
||||
}
|
||||
|
||||
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
|
||||
} else if TestContext.Provider == "aws" {
|
||||
return DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
|
||||
} else {
|
||||
return fmt.Errorf("Provider not supported")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
12
vendor/k8s.io/kubernetes/test/e2e/framework/rc_util.go
generated
vendored
@ -216,6 +216,14 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
|
||||
return nil
|
||||
}
|
||||
|
||||
// trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
|
||||
// If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
|
||||
// So this function will help to trim the docker.io/library if exists
|
||||
func trimDockerRegistry(imagename string) string {
|
||||
imagename = strings.Replace(imagename, "docker.io/", "", 1)
|
||||
return strings.Replace(imagename, "library/", "", 1)
|
||||
}
|
||||
|
||||
// validatorFn is the function which is individual tests will implement.
|
||||
// we may want it to return more than just an error, at some point.
|
||||
type validatorFn func(c clientset.Interface, podID string) error
|
||||
@ -227,6 +235,7 @@ type validatorFn func(c clientset.Interface, podID string) error
|
||||
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
|
||||
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
|
||||
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
|
||||
containerImage = trimDockerRegistry(containerImage)
|
||||
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
|
||||
// NB: kubectl adds the "exists" function to the standard template functions.
|
||||
// This lets us check to see if the "running" entry exists for each of the containers
|
||||
@ -238,7 +247,7 @@ func ValidateController(c clientset.Interface, containerImage string, replicas i
|
||||
// You can read about the syntax here: http://golang.org/pkg/text/template/.
|
||||
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
|
||||
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
|
||||
|
||||
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
|
||||
waitLoop:
|
||||
@ -258,6 +267,7 @@ waitLoop:
|
||||
}
|
||||
|
||||
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
|
||||
currentImage = trimDockerRegistry(currentImage)
|
||||
if currentImage != containerImage {
|
||||
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
|
||||
continue waitLoop
|
||||
|
28
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
28
vendor/k8s.io/kubernetes/test/e2e/framework/resource_usage_gatherer.go
generated
vendored
@ -48,6 +48,8 @@ type SingleContainerSummary struct {
|
||||
// we can't have int here, as JSON does not accept integer keys.
|
||||
type ResourceUsageSummary map[string][]SingleContainerSummary
|
||||
|
||||
const NoCPUConstraint = math.MaxFloat64
|
||||
|
||||
func (s *ResourceUsageSummary) PrintHumanReadable() string {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
@ -149,7 +151,7 @@ func (w *resourceGatherWorker) singleProbe() {
|
||||
}
|
||||
for k, v := range kubemarkData {
|
||||
data[k] = &ContainerResourceUsage{
|
||||
Name: v.Name,
|
||||
Name: v.Name,
|
||||
MemoryWorkingSetInBytes: v.MemoryWorkingSetInBytes,
|
||||
CPUUsageInCores: v.CPUUsageInCores,
|
||||
}
|
||||
@ -202,12 +204,20 @@ type ContainerResourceGatherer struct {
|
||||
|
||||
type ResourceGathererOptions struct {
|
||||
InKubemark bool
|
||||
MasterOnly bool
|
||||
Nodes NodesSet
|
||||
ResourceDataGatheringPeriod time.Duration
|
||||
ProbeDuration time.Duration
|
||||
PrintVerboseLogs bool
|
||||
}
|
||||
|
||||
type NodesSet int
|
||||
|
||||
const (
|
||||
AllNodes NodesSet = 0 // All containers on all nodes
|
||||
MasterNodes NodesSet = 1 // All containers on Master nodes only
|
||||
MasterAndDNSNodes NodesSet = 2 // All containers on Master nodes and DNS containers on other nodes
|
||||
)
|
||||
|
||||
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions, pods *v1.PodList) (*ContainerResourceGatherer, error) {
|
||||
g := ContainerResourceGatherer{
|
||||
client: c,
|
||||
@ -237,13 +247,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dnsNodes := make(map[string]bool)
|
||||
for _, pod := range pods.Items {
|
||||
if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) {
|
||||
continue
|
||||
}
|
||||
if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
|
||||
continue
|
||||
}
|
||||
for _, container := range pod.Status.InitContainerStatuses {
|
||||
g.containerIDs = append(g.containerIDs, container.Name)
|
||||
}
|
||||
for _, container := range pod.Status.ContainerStatuses {
|
||||
g.containerIDs = append(g.containerIDs, container.Name)
|
||||
}
|
||||
if options.Nodes == MasterAndDNSNodes {
|
||||
dnsNodes[pod.Spec.NodeName] = true
|
||||
}
|
||||
}
|
||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
@ -252,7 +272,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
}
|
||||
|
||||
for _, node := range nodeList.Items {
|
||||
if !options.MasterOnly || system.IsMasterNode(node.Name) {
|
||||
if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] {
|
||||
g.workerWg.Add(1)
|
||||
g.workers = append(g.workers, resourceGatherWorker{
|
||||
c: c,
|
||||
@ -266,7 +286,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
||||
probeDuration: options.ProbeDuration,
|
||||
printVerboseLogs: options.PrintVerboseLogs,
|
||||
})
|
||||
if options.MasterOnly {
|
||||
if options.Nodes == MasterNodes {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
54
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
54
vendor/k8s.io/kubernetes/test/e2e/framework/service_util.go
generated
vendored
@ -40,8 +40,6 @@ import (
|
||||
"k8s.io/client-go/util/retry"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
azurecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -254,7 +252,7 @@ func (j *ServiceTestJig) CreateOnlyLocalNodePortService(namespace, serviceName s
|
||||
svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}}
|
||||
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}}
|
||||
})
|
||||
|
||||
if createPod {
|
||||
@ -1374,23 +1372,7 @@ func VerifyServeHostnameServiceDown(c clientset.Interface, host string, serviceI
|
||||
}
|
||||
|
||||
func CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
CleanupServiceGCEResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
// TODO: we need to add this function with other cloud providers, if there is a need.
|
||||
}
|
||||
|
||||
func CleanupServiceGCEResources(c clientset.Interface, loadBalancerName, region, zone string) {
|
||||
if pollErr := wait.Poll(5*time.Second, LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := CleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
|
||||
Logf("Still waiting for glbc to cleanup: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
Failf("Failed to cleanup service GCE resources.")
|
||||
}
|
||||
TestContext.CloudConfig.Provider.CleanupServiceResources(c, loadBalancerName, region, zone)
|
||||
}
|
||||
|
||||
func DescribeSvc(ns string) {
|
||||
@ -1414,7 +1396,7 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select
|
||||
headlessService.Spec.ExternalName = externalName
|
||||
} else {
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
}
|
||||
if isHeadless {
|
||||
@ -1424,29 +1406,9 @@ func CreateServiceSpec(serviceName, externalName string, isHeadless bool, select
|
||||
}
|
||||
|
||||
// EnableAndDisableInternalLB returns two functions for enabling and disabling the internal load balancer
|
||||
// setting for the supported cloud providers: GCE/GKE and Azure
|
||||
// setting for the supported cloud providers (currently GCE/GKE and Azure) and empty functions for others.
|
||||
func EnableAndDisableInternalLB() (enable func(svc *v1.Service), disable func(svc *v1.Service)) {
|
||||
enable = func(svc *v1.Service) {}
|
||||
disable = func(svc *v1.Service) {}
|
||||
|
||||
switch TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{gcecloud.ServiceAnnotationLoadBalancerType: string(gcecloud.LBTypeInternal)}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
delete(svc.ObjectMeta.Annotations, gcecloud.ServiceAnnotationLoadBalancerType)
|
||||
}
|
||||
case "azure":
|
||||
enable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "true"}
|
||||
}
|
||||
disable = func(svc *v1.Service) {
|
||||
svc.ObjectMeta.Annotations = map[string]string{azurecloud.ServiceAnnotationLoadBalancerInternal: "false"}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return TestContext.CloudConfig.Provider.EnableAndDisableInternalLB()
|
||||
}
|
||||
|
||||
func GetServiceLoadBalancerCreationTimeout(cs clientset.Interface) time.Duration {
|
||||
@ -1523,7 +1485,7 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target
|
||||
}
|
||||
if shouldHold {
|
||||
if !transitionState && !affinityHolds {
|
||||
return true, fmt.Errorf("Affintity should hold but didn't.")
|
||||
return true, fmt.Errorf("Affinity should hold but didn't.")
|
||||
}
|
||||
if trackerFulfilled && affinityHolds {
|
||||
return true, nil
|
||||
@ -1540,9 +1502,9 @@ func CheckAffinity(jig *ServiceTestJig, execPod *v1.Pod, targetIp string, target
|
||||
checkAffinityFailed(tracker, fmt.Sprintf("Connection to %s timed out or not enough responses.", targetIpPort))
|
||||
}
|
||||
if shouldHold {
|
||||
checkAffinityFailed(tracker, "Affintity should hold but didn't.")
|
||||
checkAffinityFailed(tracker, "Affinity should hold but didn't.")
|
||||
} else {
|
||||
checkAffinityFailed(tracker, "Affintity shouldn't hold but did.")
|
||||
checkAffinityFailed(tracker, "Affinity shouldn't hold but did.")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
99
vendor/k8s.io/kubernetes/test/e2e/framework/size.go
generated
vendored
99
vendor/k8s.io/kubernetes/test/e2e/framework/size.go
generated
vendored
@ -18,14 +18,7 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -38,88 +31,15 @@ func ResizeGroup(group string, size int32) error {
|
||||
CoreDump(TestContext.ReportDir)
|
||||
defer CoreDump(TestContext.ReportDir)
|
||||
}
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resize node instance group %s: %s", group, output)
|
||||
}
|
||||
return nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.SetNodeGroupSize(group, int(size))
|
||||
} else {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.ResizeGroup(group, size)
|
||||
}
|
||||
|
||||
func GetGroupNodes(group string) ([]string, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get nodes in instance group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeNamesForNodeGroup(group)
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.GetGroupNodes(group)
|
||||
}
|
||||
|
||||
func GroupSize(group string) (int, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
zone, err := getGCEZoneForGroup(group)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("Failed to get group size for group %s: %s", group, output)
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
} else if TestContext.Provider == "kubemark" {
|
||||
return TestContext.CloudConfig.KubemarkController.GetNodeGroupSize(group)
|
||||
} else {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
return TestContext.CloudConfig.Provider.GroupSize(group)
|
||||
}
|
||||
|
||||
func WaitForGroupSize(group string, size int32) error {
|
||||
@ -139,16 +59,3 @@ func WaitForGroupSize(group string, size int32) error {
|
||||
}
|
||||
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
||||
}
|
||||
|
||||
func getGCEZoneForGroup(group string) (string, error) {
|
||||
zone := TestContext.CloudConfig.Zone
|
||||
if TestContext.CloudConfig.MultiZone {
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "list",
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--format=value(zone)", "--filter=name="+group).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to get zone for node group %s: %s", group, output)
|
||||
}
|
||||
zone = strings.TrimSpace(string(output))
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
8
vendor/k8s.io/kubernetes/test/e2e/framework/statefulset_utils.go
generated
vendored
@ -64,7 +64,7 @@ func CreateStatefulSetService(name string, labels map[string]string) *v1.Service
|
||||
},
|
||||
}
|
||||
headlessService.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: "TCP"},
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
|
||||
}
|
||||
headlessService.Spec.ClusterIP = "None"
|
||||
return headlessService
|
||||
@ -183,7 +183,7 @@ func (s *StatefulSetTester) DeleteStatefulPodAtIndex(index int, ss *apps.Statefu
|
||||
// VerifyStatefulPodFunc is a func that examines a StatefulSetPod.
|
||||
type VerifyStatefulPodFunc func(*v1.Pod)
|
||||
|
||||
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is is applied to the Pod to "visit" it.
|
||||
// VerifyPodAtIndex applies a visitor patter to the Pod at index in ss. verify is applied to the Pod to "visit" it.
|
||||
func (s *StatefulSetTester) VerifyPodAtIndex(index int, ss *apps.StatefulSet, verify VerifyStatefulPodFunc) {
|
||||
name := getStatefulSetPodNameAtIndex(index, ss)
|
||||
pod, err := s.c.CoreV1().Pods(ss.Namespace).Get(name, metav1.GetOptions{})
|
||||
@ -599,7 +599,7 @@ func (s *StatefulSetTester) ResumeNextPod(ss *apps.StatefulSet) {
|
||||
if resumedPod != "" {
|
||||
Failf("Found multiple paused stateful pods: %v and %v", pod.Name, resumedPod)
|
||||
}
|
||||
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "touch /data/statefulset-continue; sync", StatefulSetPoll, StatefulPodTimeout)
|
||||
_, err := RunHostCmdWithRetries(pod.Namespace, pod.Name, "dd if=/dev/zero of=/data/statefulset-continue bs=1 count=1 conv=fsync", StatefulSetPoll, StatefulPodTimeout)
|
||||
ExpectNoError(err)
|
||||
Logf("Resumed pod %v", pod.Name)
|
||||
resumedPod = pod.Name
|
||||
@ -810,7 +810,7 @@ func NewStatefulSet(name, ns, governingSvcName string, replicas int32, statefulP
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.NginxSlim),
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
VolumeMounts: mounts,
|
||||
},
|
||||
},
|
||||
|
150
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
150
vendor/k8s.io/kubernetes/test/e2e/framework/test_context.go
generated
vendored
@ -23,28 +23,53 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/pkg/errors"
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
"k8s.io/kubernetes/pkg/kubemark"
|
||||
"k8s.io/klog"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
)
|
||||
|
||||
const defaultHost = "http://127.0.0.1:8080"
|
||||
|
||||
// TestContextType contains test settings and global state. Due to
|
||||
// historic reasons, it is a mixture of items managed by the test
|
||||
// framework itself, cloud providers and individual tests.
|
||||
// The goal is to move anything not required by the framework
|
||||
// into the code which uses the settings.
|
||||
//
|
||||
// The recommendation for those settings is:
|
||||
// - They are stored in their own context structure or local
|
||||
// variables.
|
||||
// - The standard `flag` package is used to register them.
|
||||
// The flag name should follow the pattern <part1>.<part2>....<partn>
|
||||
// where the prefix is unlikely to conflict with other tests or
|
||||
// standard packages and each part is in lower camel case. For
|
||||
// example, test/e2e/storage/csi/context.go could define
|
||||
// storage.csi.numIterations.
|
||||
// - framework/config can be used to simplify the registration of
|
||||
// multiple options with a single function call:
|
||||
// var storageCSI {
|
||||
// NumIterations `default:"1" usage:"number of iterations"`
|
||||
// }
|
||||
// _ config.AddOptions(&storageCSI, "storage.csi")
|
||||
// - The direct use Viper in tests is possible, but discouraged because
|
||||
// it only works in test suites which use Viper (which is not
|
||||
// required) and the supported options cannot be
|
||||
// discovered by a test suite user.
|
||||
//
|
||||
// Test suite authors can use framework/viper to make all command line
|
||||
// parameters also configurable via a configuration file.
|
||||
type TestContextType struct {
|
||||
KubeConfig string
|
||||
KubemarkExternalKubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
KubeConfig string
|
||||
KubeContext string
|
||||
KubeAPIContentType string
|
||||
KubeVolumeDir string
|
||||
CertDir string
|
||||
Host string
|
||||
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
|
||||
RepoRoot string
|
||||
DockershimCheckpointDir string
|
||||
@ -64,11 +89,9 @@ type TestContextType struct {
|
||||
MinStartupPods int
|
||||
// Timeout for waiting for system pods to be running
|
||||
SystemPodsStartupTimeout time.Duration
|
||||
UpgradeTarget string
|
||||
EtcdUpgradeStorage string
|
||||
EtcdUpgradeVersion string
|
||||
IngressUpgradeImage string
|
||||
UpgradeImage string
|
||||
GCEUpgradeScript string
|
||||
ContainerRuntime string
|
||||
ContainerRuntimeEndpoint string
|
||||
@ -99,6 +122,8 @@ type TestContextType struct {
|
||||
OutputPrintType string
|
||||
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
|
||||
NodeSchedulableTimeout time.Duration
|
||||
// SystemDaemonsetStartupTimeout is the timeout for waiting for all system daemonsets to be ready.
|
||||
SystemDaemonsetStartupTimeout time.Duration
|
||||
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
|
||||
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
|
||||
// and labels to be applied to a namespace.
|
||||
@ -113,8 +138,6 @@ type TestContextType struct {
|
||||
FeatureGates map[string]bool
|
||||
// Node e2e specific test context
|
||||
NodeTestContextType
|
||||
// Storage e2e specific test context
|
||||
StorageTestContextType
|
||||
// Monitoring solution that is used in current cluster.
|
||||
ClusterMonitoringMode string
|
||||
// Separate Prometheus monitoring deployed in cluster
|
||||
@ -123,23 +146,8 @@ type TestContextType struct {
|
||||
// Indicates what path the kubernetes-anywhere is installed on
|
||||
KubernetesAnywherePath string
|
||||
|
||||
// Viper-only parameters. These will in time replace all flags.
|
||||
|
||||
// Example: Create a file 'e2e.json' with the following:
|
||||
// "Cadvisor":{
|
||||
// "MaxRetries":"6"
|
||||
// }
|
||||
|
||||
Viper string
|
||||
Cadvisor struct {
|
||||
MaxRetries int
|
||||
SleepDurationMS int
|
||||
}
|
||||
|
||||
LoggingSoak struct {
|
||||
Scale int
|
||||
MilliSecondsBetweenWaves int
|
||||
}
|
||||
// The DNS Domain of the cluster.
|
||||
ClusterDNSDomain string
|
||||
}
|
||||
|
||||
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
||||
@ -162,14 +170,6 @@ type NodeTestContextType struct {
|
||||
SystemSpecName string
|
||||
}
|
||||
|
||||
// StorageConfig contains the shared settings for storage 2e2 tests.
|
||||
type StorageTestContextType struct {
|
||||
// CSIImageVersion overrides the builtin stable version numbers if set.
|
||||
CSIImageVersion string
|
||||
// CSIImageRegistry defines the image registry hosting the CSI container images.
|
||||
CSIImageRegistry string
|
||||
}
|
||||
|
||||
type CloudConfig struct {
|
||||
ApiEndpoint string
|
||||
ProjectID string
|
||||
@ -188,8 +188,7 @@ type CloudConfig struct {
|
||||
NodeTag string
|
||||
MasterTag string
|
||||
|
||||
Provider cloudprovider.Interface
|
||||
KubemarkController *kubemark.KubemarkController
|
||||
Provider ProviderInterface
|
||||
}
|
||||
|
||||
var TestContext TestContextType
|
||||
@ -223,7 +222,6 @@ func RegisterCommonFlags() {
|
||||
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
||||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||
flag.Var(utilflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
|
||||
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
|
||||
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
|
||||
@ -231,14 +229,13 @@ func RegisterCommonFlags() {
|
||||
flag.StringVar(&TestContext.SystemdServices, "systemd-services", "docker", "The comma separated list of systemd services the framework will dump logs for.")
|
||||
flag.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
|
||||
flag.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
|
||||
flag.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.")
|
||||
flag.StringVar(&TestContext.KubernetesAnywherePath, "kubernetes-anywhere-path", "/workspace/k8s.io/kubernetes-anywhere", "Which directory kubernetes-anywhere is installed to.")
|
||||
}
|
||||
|
||||
// Register flags specific to the cluster e2e test suite.
|
||||
func RegisterClusterFlags() {
|
||||
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
|
||||
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
|
||||
flag.StringVar(&TestContext.KubemarkExternalKubeConfig, fmt.Sprintf("%s-%s", "kubemark-external", clientcmd.RecommendedConfigPathFlag), "", "Path to kubeconfig containing embedded authinfo for external cluster.")
|
||||
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
|
||||
flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
|
||||
|
||||
@ -250,15 +247,16 @@ func RegisterClusterFlags() {
|
||||
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
|
||||
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
|
||||
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
|
||||
flag.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).")
|
||||
flag.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, ubuntu, gci, coreos, or custom).")
|
||||
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, ubuntu, gci, coreos, or custom).")
|
||||
flag.StringVar(&TestContext.ClusterMonitoringMode, "cluster-monitoring-mode", "standalone", "The monitoring solution that is used in the cluster.")
|
||||
flag.BoolVar(&TestContext.EnablePrometheusMonitoring, "prometheus-monitoring", false, "Separate Prometheus monitoring deployed in cluster.")
|
||||
flag.StringVar(&TestContext.ClusterDNSDomain, "dns-domain", "cluster.local", "The DNS Domain of the cluster.")
|
||||
|
||||
// TODO: Flags per provider? Rename gce-project/gce-zone?
|
||||
cloudConfig := &TestContext.CloudConfig
|
||||
flag.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
|
||||
flag.StringVar(&cloudConfig.ApiEndpoint, "gce-api-endpoint", "", "The GCE ApiEndpoint being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.ApiEndpoint, "gce-api-endpoint", "", "The GCE APIEndpoint being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
|
||||
flag.StringVar(&cloudConfig.Region, "gce-region", "", "GCE region being used, if applicable")
|
||||
@ -277,10 +275,9 @@ func RegisterClusterFlags() {
|
||||
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
|
||||
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
|
||||
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 30*time.Minute, "Timeout for waiting for all nodes to be schedulable.")
|
||||
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||
flag.DurationVar(&TestContext.SystemDaemonsetStartupTimeout, "system-daemonsets-startup-timeout", 5*time.Minute, "Timeout for waiting for all system daemonsets to be ready.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeStorage, "etcd-upgrade-storage", "", "The storage version to upgrade to (either 'etcdv2' or 'etcdv3') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.EtcdUpgradeVersion, "etcd-upgrade-version", "", "The etcd binary version to upgrade to (e.g., '3.0.14', '2.3.7') if doing an etcd upgrade test.")
|
||||
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
|
||||
flag.StringVar(&TestContext.IngressUpgradeImage, "ingress-upgrade-image", "", "Image to upgrade to if doing an upgrade test for ingress.")
|
||||
flag.StringVar(&TestContext.GCEUpgradeScript, "gce-upgrade-script", "", "Script to use to upgrade a GCE cluster.")
|
||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||
@ -303,32 +300,11 @@ func RegisterNodeFlags() {
|
||||
flag.StringVar(&TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.")
|
||||
}
|
||||
|
||||
func RegisterStorageFlags() {
|
||||
flag.StringVar(&TestContext.CSIImageVersion, "csiImageVersion", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
||||
flag.StringVar(&TestContext.CSIImageRegistry, "csiImageRegistry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
||||
}
|
||||
|
||||
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
|
||||
func ViperizeFlags() {
|
||||
|
||||
// Part 1: Set regular flags.
|
||||
// TODO: Future, lets eliminate e2e 'flag' deps entirely in favor of viper only,
|
||||
// since go test 'flag's are sort of incompatible w/ flag, glog, etc.
|
||||
// HandleFlags sets up all flags and parses the command line.
|
||||
func HandleFlags() {
|
||||
RegisterCommonFlags()
|
||||
RegisterClusterFlags()
|
||||
RegisterStorageFlags()
|
||||
flag.Parse()
|
||||
|
||||
// Part 2: Set Viper provided flags.
|
||||
// This must be done after common flags are registered, since Viper is a flag option.
|
||||
viper.SetConfigName(TestContext.Viper)
|
||||
viper.AddConfigPath(".")
|
||||
viper.ReadInConfig()
|
||||
|
||||
// TODO Consider whether or not we want to use overwriteFlagsWithViperConfig().
|
||||
viper.Unmarshal(&TestContext)
|
||||
|
||||
AfterReadingAllFlags(&TestContext)
|
||||
}
|
||||
|
||||
func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
@ -339,7 +315,7 @@ func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
|
||||
config := clientcmdapi.NewConfig()
|
||||
|
||||
credentials := clientcmdapi.NewAuthInfo()
|
||||
credentials.Token = clientCfg.BearerToken
|
||||
credentials.TokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
|
||||
if len(credentials.ClientCertificate) == 0 {
|
||||
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
|
||||
@ -379,11 +355,11 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
kubeConfig := createKubeConfig(clusterConfig)
|
||||
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
|
||||
t.KubeConfig = tempFile.Name()
|
||||
glog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
klog.Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())
|
||||
}
|
||||
}
|
||||
if len(t.KubeConfig) == 0 {
|
||||
glog.Warningf("Unable to find in-cluster config, using default host : %s", defaultHost)
|
||||
klog.Warningf("Unable to find in-cluster config, using default host : %s", defaultHost)
|
||||
t.Host = defaultHost
|
||||
}
|
||||
}
|
||||
@ -391,4 +367,24 @@ func AfterReadingAllFlags(t *TestContextType) {
|
||||
if t.AllowedNotReadyNodes == 0 {
|
||||
t.AllowedNotReadyNodes = t.CloudConfig.NumNodes / 100
|
||||
}
|
||||
|
||||
// Make sure that all test runs have a valid TestContext.CloudConfig.Provider.
|
||||
var err error
|
||||
TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if !os.IsNotExist(errors.Cause(err)) {
|
||||
Failf("Failed to setup provider config: %v", err)
|
||||
}
|
||||
// We allow unknown provider parameters for historic reasons. At least log a
|
||||
// warning to catch typos.
|
||||
// TODO (https://github.com/kubernetes/kubernetes/issues/70200):
|
||||
// - remove the fallback for unknown providers
|
||||
// - proper error message instead of Failf (which panics)
|
||||
klog.Warningf("Unknown provider %q, proceeding as for --provider=skeleton.", TestContext.Provider)
|
||||
TestContext.CloudConfig.Provider, err = SetupProviderConfig("skeleton")
|
||||
if err != nil {
|
||||
Failf("Failed to setup fallback skeleton provider config: %v", err)
|
||||
}
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/BUILD
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/BUILD
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["testfiles.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/testfiles",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
189
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go
generated
vendored
Normal file
189
vendor/k8s.io/kubernetes/test/e2e/framework/testfiles/testfiles.go
generated
vendored
Normal file
@ -0,0 +1,189 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package testfiles provides a wrapper around various optional ways
|
||||
// of retrieving additional files needed during a test run:
|
||||
// - builtin bindata
|
||||
// - filesystem access
|
||||
//
|
||||
// Because it is a is self-contained package, it can be used by
|
||||
// test/e2e/framework and test/e2e/manifest without creating
|
||||
// a circular dependency.
|
||||
package testfiles
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var filesources []FileSource
|
||||
|
||||
// AddFileSource registers another provider for files that may be
|
||||
// needed at runtime. Should be called during initialization of a test
|
||||
// binary.
|
||||
func AddFileSource(filesource FileSource) {
|
||||
filesources = append(filesources, filesource)
|
||||
}
|
||||
|
||||
// FileSource implements one way of retrieving test file content. For
|
||||
// example, one file source could read from the original source code
|
||||
// file tree, another from bindata compiled into a test executable.
|
||||
type FileSource interface {
|
||||
// ReadTestFile retrieves the content of a file that gets maintained
|
||||
// alongside a test's source code. Files are identified by the
|
||||
// relative path inside the repository containing the tests, for
|
||||
// example "cluster/gce/upgrade.sh" inside kubernetes/kubernetes.
|
||||
//
|
||||
// When the file is not found, a nil slice is returned. An error is
|
||||
// returned for all fatal errors.
|
||||
ReadTestFile(filePath string) ([]byte, error)
|
||||
|
||||
// DescribeFiles returns a multi-line description of which
|
||||
// files are available via this source. It is meant to be
|
||||
// used as part of the error message when a file cannot be
|
||||
// found.
|
||||
DescribeFiles() string
|
||||
}
|
||||
|
||||
// Fail is an error handler function with the same prototype and
|
||||
// semantic as ginkgo.Fail. Typically ginkgo.Fail is what callers
|
||||
// of ReadOrDie and Exists will pass. This way this package
|
||||
// avoids depending on Ginkgo.
|
||||
type Fail func(failure string, callerSkip ...int)
|
||||
|
||||
// ReadOrDie tries to retrieve the desired file content from
|
||||
// one of the registered file sources. In contrast to FileSource, it
|
||||
// will either return a valid slice or abort the test by calling the fatal function,
|
||||
// i.e. the caller doesn't have to implement error checking.
|
||||
func ReadOrDie(filePath string, fail Fail) []byte {
|
||||
data, err := Read(filePath)
|
||||
if err != nil {
|
||||
fail(err.Error(), 1)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// Read tries to retrieve the desired file content from
|
||||
// one of the registered file sources.
|
||||
func Read(filePath string) ([]byte, error) {
|
||||
if len(filesources) == 0 {
|
||||
return nil, fmt.Errorf("no file sources registered (yet?), cannot retrieve test file %s", filePath)
|
||||
}
|
||||
for _, filesource := range filesources {
|
||||
data, err := filesource.ReadTestFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
|
||||
}
|
||||
if data != nil {
|
||||
return data, nil
|
||||
}
|
||||
}
|
||||
// Here we try to generate an error that points test authors
|
||||
// or users in the right direction for resolving the problem.
|
||||
error := fmt.Sprintf("Test file %q was not found.\n", filePath)
|
||||
for _, filesource := range filesources {
|
||||
error += filesource.DescribeFiles()
|
||||
error += "\n"
|
||||
}
|
||||
return nil, errors.New(error)
|
||||
}
|
||||
|
||||
// Exists checks whether a file could be read. Unexpected errors
|
||||
// are handled by calling the fail function, which then should
|
||||
// abort the current test.
|
||||
func Exists(filePath string, fail Fail) bool {
|
||||
for _, filesource := range filesources {
|
||||
data, err := filesource.ReadTestFile(filePath)
|
||||
if err != nil {
|
||||
fail(fmt.Sprintf("fatal error looking for test file %s: %s", filePath, err), 1)
|
||||
}
|
||||
if data != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RootFileSource looks for files relative to a root directory.
|
||||
type RootFileSource struct {
|
||||
Root string
|
||||
}
|
||||
|
||||
// ReadTestFile looks for the file relative to the configured
|
||||
// root directory.
|
||||
func (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {
|
||||
fullPath := filepath.Join(r.Root, filePath)
|
||||
data, err := ioutil.ReadFile(fullPath)
|
||||
if os.IsNotExist(err) {
|
||||
// Not an error (yet), some other provider may have the file.
|
||||
return nil, nil
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
// DescribeFiles explains that it looks for files inside a certain
|
||||
// root directory.
|
||||
func (r RootFileSource) DescribeFiles() string {
|
||||
description := fmt.Sprintf("Test files are expected in %q", r.Root)
|
||||
if !path.IsAbs(r.Root) {
|
||||
// The default in test_context.go is the relative path
|
||||
// ../../, which doesn't really help locating the
|
||||
// actual location. Therefore we add also the absolute
|
||||
// path if necessary.
|
||||
abs, err := filepath.Abs(r.Root)
|
||||
if err == nil {
|
||||
description += fmt.Sprintf(" = %q", abs)
|
||||
}
|
||||
}
|
||||
description += "."
|
||||
return description
|
||||
}
|
||||
|
||||
// BindataFileSource handles files stored in a package generated with bindata.
|
||||
type BindataFileSource struct {
|
||||
Asset func(string) ([]byte, error)
|
||||
AssetNames func() []string
|
||||
}
|
||||
|
||||
// ReadTestFile looks for an asset with the given path.
|
||||
func (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {
|
||||
fileBytes, err := b.Asset(filePath)
|
||||
if err != nil {
|
||||
// It would be nice to have a better way to detect
|
||||
// "not found" errors :-/
|
||||
if strings.HasSuffix(err.Error(), "not found") {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return fileBytes, nil
|
||||
}
|
||||
|
||||
// DescribeFiles explains about gobindata and then lists all available files.
|
||||
func (b BindataFileSource) DescribeFiles() string {
|
||||
var lines []string
|
||||
lines = append(lines, "The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.")
|
||||
assets := b.AssetNames()
|
||||
sort.Strings(assets)
|
||||
lines = append(lines, assets...)
|
||||
description := strings.Join(lines, "\n ")
|
||||
return description
|
||||
}
|
429
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
429
vendor/k8s.io/kubernetes/test/e2e/framework/util.go
generated
vendored
@ -41,10 +41,9 @@ import (
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/net/websocket"
|
||||
"google.golang.org/api/googleapi"
|
||||
"k8s.io/klog"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -63,18 +62,19 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
|
||||
@ -83,10 +83,9 @@ import (
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||
"k8s.io/kubernetes/pkg/controller/service"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
@ -96,7 +95,6 @@ import (
|
||||
sshutil "k8s.io/kubernetes/pkg/ssh"
|
||||
"k8s.io/kubernetes/pkg/util/system"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
utilversion "k8s.io/kubernetes/pkg/util/version"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -113,11 +111,14 @@ const (
|
||||
// Same as `PodStartTimeout` to wait for the pod to be started, but shorter.
|
||||
// Use it case by case when we are sure pod start will not be delayed
|
||||
// minutes by slow docker pulls or something else.
|
||||
PodStartShortTimeout = 1 * time.Minute
|
||||
PodStartShortTimeout = 2 * time.Minute
|
||||
|
||||
// How long to wait for a pod to be deleted
|
||||
PodDeleteTimeout = 5 * time.Minute
|
||||
|
||||
// PodEventTimeout is how much we wait for a pod event to occur.
|
||||
PodEventTimeout = 2 * time.Minute
|
||||
|
||||
// If there are any orphaned namespaces to clean up, this test is running
|
||||
// on a long lived cluster. A long wait here is preferably to spurious test
|
||||
// failures caused by leaked resources from a previous test run.
|
||||
@ -200,19 +201,10 @@ const (
|
||||
|
||||
// ssh port
|
||||
sshPort = "22"
|
||||
|
||||
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
|
||||
// static pods to pull the list of seeded images. If they don't pull
|
||||
// images within this time we simply log their output and carry on
|
||||
// with the tests.
|
||||
ImagePrePullingTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
BusyBoxImage = "busybox"
|
||||
// Label allocated to the image puller static pod that runs on each node
|
||||
// before e2es.
|
||||
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
|
||||
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||
|
||||
// For parsing Kubectl version for version-skewed testing.
|
||||
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
|
||||
@ -250,11 +242,6 @@ func GetServerArchitecture(c clientset.Interface) string {
|
||||
return arch
|
||||
}
|
||||
|
||||
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
|
||||
func GetPauseImageName(c clientset.Interface) string {
|
||||
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
|
||||
}
|
||||
|
||||
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
|
||||
return request.Resource("services").SubResource("proxy"), nil
|
||||
}
|
||||
@ -272,7 +259,7 @@ type ContainerFailures struct {
|
||||
func GetMasterHost() string {
|
||||
masterUrl, err := url.Parse(TestContext.Host)
|
||||
ExpectNoError(err)
|
||||
return masterUrl.Host
|
||||
return masterUrl.Hostname()
|
||||
}
|
||||
|
||||
func nowStamp() string {
|
||||
@ -363,7 +350,7 @@ func SkipIfMultizone(c clientset.Interface) {
|
||||
Skipf("Error listing cluster zones")
|
||||
}
|
||||
if zones.Len() > 1 {
|
||||
Skipf("Requires more than one zone")
|
||||
Skipf("Requires at most one zone")
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,6 +394,12 @@ func SkipUnlessSecretExistsAfterWait(c clientset.Interface, name, namespace stri
|
||||
Logf("Secret %v in namespace %v found after duration %v", name, namespace, time.Since(start))
|
||||
}
|
||||
|
||||
func SkipUnlessTaintBasedEvictionsEnabled() {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions) {
|
||||
Skipf("Only supported when %v feature is enabled", features.TaintBasedEvictions)
|
||||
}
|
||||
}
|
||||
|
||||
func SkipIfContainerRuntimeIs(runtimes ...string) {
|
||||
for _, runtime := range runtimes {
|
||||
if runtime == TestContext.ContainerRuntime {
|
||||
@ -638,7 +631,7 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
|
||||
//
|
||||
// If ignoreLabels is not empty, pods matching this selector are ignored.
|
||||
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
|
||||
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
|
||||
ignoreSelector := labels.SelectorFromSet(map[string]string{})
|
||||
start := time.Now()
|
||||
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
|
||||
timeout, minPods, ns)
|
||||
@ -738,6 +731,40 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitForDaemonSets for all daemonsets in the given namespace to be ready
|
||||
// (defined as all but 'allowedNotReadyNodes' pods associated with that
|
||||
// daemonset are ready).
|
||||
func WaitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes int32, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
Logf("Waiting up to %v for all daemonsets in namespace '%s' to start",
|
||||
timeout, ns)
|
||||
|
||||
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||
if testutils.IsRetryableAPIError(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
var notReadyDaemonSets []string
|
||||
for _, ds := range dsList.Items {
|
||||
Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ds.Status.NumberReady, ds.Status.DesiredNumberScheduled, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
|
||||
if ds.Status.DesiredNumberScheduled-ds.Status.NumberReady > allowedNotReadyNodes {
|
||||
notReadyDaemonSets = append(notReadyDaemonSets, ds.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(notReadyDaemonSets) > 0 {
|
||||
Logf("there are not ready daemonsets: %v", notReadyDaemonSets)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if strings.Contains(container.Name, containerNameSubstr) {
|
||||
@ -862,7 +889,9 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, conditions.ServiceAccountHasSecrets)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -987,22 +1016,40 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll,
|
||||
|
||||
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
|
||||
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
|
||||
return WaitForPersistentVolumeClaimsPhase(phase, c, ns, []string{pvcName}, Poll, timeout, true)
|
||||
}
|
||||
|
||||
// WaitForPersistentVolumeClaimPhase waits for any (if matchAny is true) or all (if matchAny is false) PersistentVolumeClaims
|
||||
// to be in a specific phase or until timeout occurs, whichever comes first.
|
||||
func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcNames []string, Poll, timeout time.Duration, matchAny bool) error {
|
||||
if len(pvcNames) == 0 {
|
||||
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0.")
|
||||
}
|
||||
Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
|
||||
continue
|
||||
} else {
|
||||
if pvc.Status.Phase == phase {
|
||||
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
|
||||
return nil
|
||||
phaseFoundInAllClaims := true
|
||||
for _, pvcName := range pvcNames {
|
||||
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
|
||||
continue
|
||||
} else {
|
||||
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
|
||||
if pvc.Status.Phase == phase {
|
||||
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
|
||||
if matchAny {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
|
||||
phaseFoundInAllClaims = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if phaseFoundInAllClaims {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
|
||||
return fmt.Errorf("PersistentVolumeClaims %v not all in phase %s within %v", pvcNames, phase, timeout)
|
||||
}
|
||||
|
||||
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
|
||||
@ -1437,6 +1484,27 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition
|
||||
}
|
||||
}
|
||||
|
||||
// WaitTimeoutForPodEvent waits for an event to occur for a pod
|
||||
func WaitTimeoutForPodEvent(c clientset.Interface, podName, namespace, eventSelector, msg string, timeout time.Duration) error {
|
||||
return wait.PollImmediate(Poll, timeout, eventOccurred(c, podName, namespace, eventSelector, msg))
|
||||
}
|
||||
|
||||
func eventOccurred(c clientset.Interface, podName, namespace, eventSelector, msg string) wait.ConditionFunc {
|
||||
options := metav1.ListOptions{FieldSelector: eventSelector}
|
||||
return func() (bool, error) {
|
||||
events, err := c.CoreV1().Events(namespace).List(options)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("got error while getting pod events: %s", err)
|
||||
}
|
||||
for _, event := range events.Items {
|
||||
if strings.Contains(event.Message, msg) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running.
|
||||
// Returns an error if timeout occurs first.
|
||||
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
|
||||
@ -1578,7 +1646,9 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
|
||||
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.UntilWithoutRetry(ctx, w, func(event watch.Event) (bool, error) {
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
|
||||
@ -1745,7 +1815,7 @@ func WaitForEndpoint(c clientset.Interface, ns, name string) error {
|
||||
}
|
||||
|
||||
// Context for checking pods responses by issuing GETs to them (via the API
|
||||
// proxy) and verifying that they answer with there own pod name.
|
||||
// proxy) and verifying that they answer with their own pod name.
|
||||
type podProxyResponseChecker struct {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
@ -2621,6 +2691,8 @@ func GetReadyNodesIncludingTaintedOrDie(c clientset.Interface) (nodes *v1.NodeLi
|
||||
return nodes
|
||||
}
|
||||
|
||||
// WaitForAllNodesSchedulable waits up to timeout for all
|
||||
// (but TestContext.AllowedNotReadyNodes) to become scheduable.
|
||||
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
|
||||
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
|
||||
|
||||
@ -2643,7 +2715,13 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !isNodeSchedulable(node) {
|
||||
if _, hasMasterRoleLabel := node.ObjectMeta.Labels[service.LabelNodeRoleMaster]; hasMasterRoleLabel {
|
||||
// Kops clusters have masters with spec.unscheduable = false and
|
||||
// node-role.kubernetes.io/master NoSchedule taint.
|
||||
// Don't wait for them.
|
||||
continue
|
||||
}
|
||||
if !isNodeSchedulable(node) || !isNodeUntainted(node) {
|
||||
notSchedulable = append(notSchedulable, node)
|
||||
}
|
||||
}
|
||||
@ -2659,10 +2737,11 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
|
||||
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
|
||||
Logf("Unschedulable nodes:")
|
||||
for i := range notSchedulable {
|
||||
Logf("-> %s Ready=%t Network=%t",
|
||||
Logf("-> %s Ready=%t Network=%t Taints=%v",
|
||||
notSchedulable[i].Name,
|
||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
|
||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
|
||||
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false),
|
||||
notSchedulable[i].Spec.Taints)
|
||||
}
|
||||
Logf("================================")
|
||||
}
|
||||
@ -3102,7 +3181,10 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
terminatePodTime := time.Since(startTime) - deleteTime
|
||||
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
|
||||
|
||||
err = waitForPodsGone(ps, interval, 10*time.Minute)
|
||||
// In gce, at any point, small percentage of nodes can disappear for
|
||||
// ~10 minutes due to hostError. 20 minutes should be long enough to
|
||||
// restart VM in that case and delete the pod.
|
||||
err = waitForPodsGone(ps, interval, 20*time.Minute)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||
}
|
||||
@ -3114,25 +3196,48 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
||||
// when the pod is inactvie.
|
||||
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
var activePods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
pods := ps.List()
|
||||
activePods = nil
|
||||
for _, pod := range pods {
|
||||
if controller.IsPodActive(pod) {
|
||||
return false, nil
|
||||
activePods = append(activePods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
if len(activePods) != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range activePods {
|
||||
Logf("ERROR: Pod %q running on %q is still active", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d active pods. E.g. %q on node %q", len(activePods), activePods[0].Name, activePods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForPodsGone waits until there are no pods left in the PodStore.
|
||||
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
||||
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods := ps.List(); len(pods) == 0 {
|
||||
var pods []*v1.Pod
|
||||
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
|
||||
if pods = ps.List(); len(pods) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err == wait.ErrWaitTimeout {
|
||||
for _, pod := range pods {
|
||||
Logf("ERROR: Pod %q still exists. Node: %q", pod.Name, pod.Spec.NodeName)
|
||||
}
|
||||
return fmt.Errorf("there are %d pods left. E.g. %q on node %q", len(pods), pods[0].Name, pods[0].Spec.NodeName)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
|
||||
@ -3334,8 +3439,8 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult,
|
||||
LogSSHResult(result)
|
||||
|
||||
if result.Code != 0 || err != nil {
|
||||
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
|
||||
cmd, err, result.Code)
|
||||
return nil, fmt.Errorf("failed running %q: %v (exit code %d, stderr %v)",
|
||||
cmd, err, result.Code, result.Stderr)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
@ -3842,7 +3947,7 @@ func ParseKVLines(output, key string) string {
|
||||
func RestartKubeProxy(host string) error {
|
||||
// TODO: Make it work for all providers.
|
||||
if !ProviderIs("gce", "gke", "aws") {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
return fmt.Errorf("unsupported provider for RestartKubeProxy: %s", TestContext.Provider)
|
||||
}
|
||||
// kubelet will restart the kube-proxy since it's running in a static pod
|
||||
Logf("Killing kube-proxy on node %v", host)
|
||||
@ -3879,7 +3984,7 @@ func RestartKubelet(host string) error {
|
||||
// TODO: Make it work for all providers and distros.
|
||||
supportedProviders := []string{"gce", "aws", "vsphere"}
|
||||
if !ProviderIs(supportedProviders...) {
|
||||
return fmt.Errorf("unsupported provider: %s, supported providers are: %v", TestContext.Provider, supportedProviders)
|
||||
return fmt.Errorf("unsupported provider for RestartKubelet: %s, supported providers are: %v", TestContext.Provider, supportedProviders)
|
||||
}
|
||||
if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") {
|
||||
return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro)
|
||||
@ -3935,7 +4040,7 @@ func WaitForKubeletUp(host string) error {
|
||||
func RestartApiserver(cs clientset.Interface) error {
|
||||
// TODO: Make it work for all providers.
|
||||
if !ProviderIs("gce", "gke", "aws") {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
return fmt.Errorf("unsupported provider for RestartApiserver: %s", TestContext.Provider)
|
||||
}
|
||||
if ProviderIs("gce", "aws") {
|
||||
initialRestartCount, err := getApiserverRestartCount(cs)
|
||||
@ -3958,7 +4063,7 @@ func RestartApiserver(cs clientset.Interface) error {
|
||||
|
||||
func sshRestartMaster() error {
|
||||
if !ProviderIs("gce", "aws") {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
return fmt.Errorf("unsupported provider for sshRestartMaster: %s", TestContext.Provider)
|
||||
}
|
||||
var command string
|
||||
if ProviderIs("gce") {
|
||||
@ -4024,7 +4129,7 @@ func getApiserverRestartCount(c clientset.Interface) (int32, error) {
|
||||
func RestartControllerManager() error {
|
||||
// TODO: Make it work for all providers and distros.
|
||||
if !ProviderIs("gce", "aws") {
|
||||
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
|
||||
return fmt.Errorf("unsupported provider for RestartControllerManager: %s", TestContext.Provider)
|
||||
}
|
||||
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
|
||||
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
|
||||
@ -4119,7 +4224,9 @@ func CheckNodesReady(c clientset.Interface, size int, timeout time.Duration) ([]
|
||||
|
||||
// Filter out not-ready nodes.
|
||||
FilterNodes(nodes, func(node v1.Node) bool {
|
||||
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||
nodeReady := IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
|
||||
networkReady := IsNodeConditionUnset(&node, v1.NodeNetworkUnavailable) || IsNodeConditionSetAsExpected(&node, v1.NodeNetworkUnavailable, false)
|
||||
return nodeReady && networkReady
|
||||
})
|
||||
numReady := len(nodes.Items)
|
||||
|
||||
@ -4215,13 +4322,13 @@ func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
|
||||
// headersForConfig extracts any http client logic necessary for the provided
|
||||
// config.
|
||||
func headersForConfig(c *restclient.Config) (http.Header, error) {
|
||||
func headersForConfig(c *restclient.Config, url *url.URL) (http.Header, error) {
|
||||
extract := &extractRT{}
|
||||
rt, err := restclient.HTTPWrappersForConfig(c, extract)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
|
||||
if _, err := rt.RoundTrip(&http.Request{URL: url}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return extract.Header, nil
|
||||
@ -4245,7 +4352,7 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
|
||||
url.Host += ":80"
|
||||
}
|
||||
}
|
||||
headers, err := headersForConfig(config)
|
||||
headers, err := headersForConfig(config, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load http headers: %v", err)
|
||||
}
|
||||
@ -4381,48 +4488,10 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
|
||||
return string(logs), err
|
||||
}
|
||||
|
||||
func GetGCECloud() (*gcecloud.GCECloud, error) {
|
||||
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return gceCloud, nil
|
||||
}
|
||||
|
||||
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
|
||||
// are actually cleaned up. Currently only implemented for GCE/GKE.
|
||||
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project := TestContext.CloudConfig.ProjectID
|
||||
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
|
||||
}
|
||||
|
||||
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
service := gceCloud.ComputeServices().GA
|
||||
list, err := service.ForwardingRules.List(project, region).Do()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, item := range list.Items {
|
||||
if item.PortRange == portRange && item.IPAddress == ip {
|
||||
Logf("found a load balancer: %v", item)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
return TestContext.CloudConfig.Provider.EnsureLoadBalancerResourcesDeleted(ip, portRange)
|
||||
}
|
||||
|
||||
// The following helper functions can block/unblock network from source
|
||||
@ -4492,7 +4561,7 @@ func isElementOf(podUID types.UID, pods *v1.PodList) bool {
|
||||
const proxyTimeout = 2 * time.Minute
|
||||
|
||||
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
|
||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
|
||||
func NodeProxyRequest(c clientset.Interface, node, endpoint string, port int) (restclient.Result, error) {
|
||||
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
|
||||
// This will leak a goroutine if proxy hangs. #22165
|
||||
var result restclient.Result
|
||||
@ -4501,7 +4570,7 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.
|
||||
result = c.CoreV1().RESTClient().Get().
|
||||
Resource("nodes").
|
||||
SubResource("proxy").
|
||||
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
|
||||
Name(fmt.Sprintf("%v:%v", node, port)).
|
||||
Suffix(endpoint).
|
||||
Do()
|
||||
|
||||
@ -4529,7 +4598,7 @@ func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, err
|
||||
|
||||
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
|
||||
result := &v1.PodList{}
|
||||
client, err := NodeProxyRequest(c, node, resource)
|
||||
client, err := NodeProxyRequest(c, node, resource, ports.KubeletPort)
|
||||
if err != nil {
|
||||
return &v1.PodList{}, err
|
||||
}
|
||||
@ -4798,7 +4867,8 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
||||
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
|
||||
nodes := &v1.NodeList{}
|
||||
masters := sets.NewString()
|
||||
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
for _, n := range all.Items {
|
||||
if system.IsMasterNode(n.Name) {
|
||||
masters.Insert(n.Name)
|
||||
@ -4815,7 +4885,7 @@ func ListNamespaceEvents(c clientset.Interface, ns string) error {
|
||||
return err
|
||||
}
|
||||
for _, event := range ls.Items {
|
||||
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
|
||||
klog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -4842,8 +4912,8 @@ func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testut
|
||||
func (p *E2ETestNodePreparer) PrepareNodes() error {
|
||||
nodes := GetReadySchedulableNodesOrDie(p.client)
|
||||
numTemplates := 0
|
||||
for k := range p.countToStrategy {
|
||||
numTemplates += k
|
||||
for _, v := range p.countToStrategy {
|
||||
numTemplates += v.Count
|
||||
}
|
||||
if numTemplates > len(nodes.Items) {
|
||||
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
|
||||
@ -4854,7 +4924,7 @@ func (p *E2ETestNodePreparer) PrepareNodes() error {
|
||||
sum += v.Count
|
||||
for ; index < sum; index++ {
|
||||
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
|
||||
glog.Errorf("Aborting node preparation: %v", err)
|
||||
klog.Errorf("Aborting node preparation: %v", err)
|
||||
return err
|
||||
}
|
||||
p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy
|
||||
@ -4872,7 +4942,7 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
|
||||
strategy, found := p.nodeToAppliedStrategy[name]
|
||||
if found {
|
||||
if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil {
|
||||
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
|
||||
klog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
|
||||
encounteredError = err
|
||||
}
|
||||
}
|
||||
@ -4880,78 +4950,6 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
|
||||
return encounteredError
|
||||
}
|
||||
|
||||
func GetClusterID(c clientset.Interface) (string, error) {
|
||||
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
|
||||
if err != nil || cm == nil {
|
||||
return "", fmt.Errorf("error getting cluster ID: %v", err)
|
||||
}
|
||||
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
|
||||
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
|
||||
if !clusterIDExists {
|
||||
return "", fmt.Errorf("cluster ID not set")
|
||||
}
|
||||
if providerIDExists {
|
||||
return providerID, nil
|
||||
}
|
||||
return clusterID, nil
|
||||
}
|
||||
|
||||
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
|
||||
// the given name. The name is usually the UUID of the Service prefixed with an
|
||||
// alpha-numeric character ('a') to work around cloudprovider rules.
|
||||
func CleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
|
||||
gceCloud, err := GetGCECloud()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if region == "" {
|
||||
// Attempt to parse region from zone if no region is given.
|
||||
region, err = gcecloud.GetGCERegion(zone)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
|
||||
}
|
||||
}
|
||||
if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = err
|
||||
}
|
||||
if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
|
||||
}
|
||||
if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
clusterID, err := GetClusterID(c)
|
||||
if err != nil {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
return
|
||||
}
|
||||
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
|
||||
hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName)
|
||||
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
|
||||
return
|
||||
}
|
||||
if hc != nil {
|
||||
hcNames = append(hcNames, hc.Name)
|
||||
}
|
||||
if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
|
||||
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
|
||||
retErr = fmt.Errorf("%v\n%v", retErr, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IsHTTPErrorCode returns true if the error is a google api
|
||||
// error matching the corresponding HTTP error code.
|
||||
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
|
||||
apiErr, ok := err.(*googleapi.Error)
|
||||
return ok && apiErr.Code == code
|
||||
}
|
||||
|
||||
// getMaster populates the externalIP, internalIP and hostname fields of the master.
|
||||
// If any of these is unavailable, it is set to "".
|
||||
func getMaster(c clientset.Interface) Address {
|
||||
@ -4982,24 +4980,33 @@ func getMaster(c clientset.Interface) Address {
|
||||
return master
|
||||
}
|
||||
|
||||
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
|
||||
// which is the address of the interface used for communication with the kubelet.
|
||||
func GetMasterAddress(c clientset.Interface) string {
|
||||
// GetAllMasterAddresses returns all IP addresses on which the kubelet can reach the master.
|
||||
// It may return internal and external IPs, even if we expect for
|
||||
// e.g. internal IPs to be used (issue #56787), so that we can be
|
||||
// sure to block the master fully during tests.
|
||||
func GetAllMasterAddresses(c clientset.Interface) []string {
|
||||
master := getMaster(c)
|
||||
|
||||
ips := sets.NewString()
|
||||
switch TestContext.Provider {
|
||||
case "gce", "gke":
|
||||
return master.externalIP
|
||||
if master.externalIP != "" {
|
||||
ips.Insert(master.externalIP)
|
||||
}
|
||||
if master.internalIP != "" {
|
||||
ips.Insert(master.internalIP)
|
||||
}
|
||||
case "aws":
|
||||
return awsMasterIP
|
||||
ips.Insert(awsMasterIP)
|
||||
default:
|
||||
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
|
||||
}
|
||||
return ""
|
||||
return ips.List()
|
||||
}
|
||||
|
||||
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
|
||||
// e.g. 1.2.3.4:22
|
||||
func GetNodeExternalIP(node *v1.Node) string {
|
||||
func GetNodeExternalIP(node *v1.Node) (string, error) {
|
||||
Logf("Getting external IP address for %s", node.Name)
|
||||
host := ""
|
||||
for _, a := range node.Status.Addresses {
|
||||
@ -5009,9 +5016,26 @@ func GetNodeExternalIP(node *v1.Node) string {
|
||||
}
|
||||
}
|
||||
if host == "" {
|
||||
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||
}
|
||||
return host
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// GetNodeInternalIP returns node internal IP
|
||||
func GetNodeInternalIP(node *v1.Node) (string, error) {
|
||||
host := ""
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeInternalIP {
|
||||
if address.Address != "" {
|
||||
host = net.JoinHostPort(address.Address, sshPort)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if host == "" {
|
||||
return "", fmt.Errorf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
|
||||
}
|
||||
return host, nil
|
||||
}
|
||||
|
||||
// SimpleGET executes a get on the given url, returns error if non-200 returned.
|
||||
@ -5076,7 +5100,7 @@ func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "pause",
|
||||
Image: GetPauseImageName(f.ClientSet),
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: requests,
|
||||
Limits: limits,
|
||||
@ -5093,15 +5117,6 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
|
||||
return err
|
||||
}
|
||||
|
||||
// GetAzureCloud returns azure cloud provider
|
||||
func GetAzureCloud() (*azure.Cloud, error) {
|
||||
cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider)
|
||||
}
|
||||
return cloud, nil
|
||||
}
|
||||
|
||||
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
|
||||
now := time.Now()
|
||||
for i := range summaries {
|
||||
@ -5243,3 +5258,17 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// WaitForNodeHasTaintOrNot waits for a taint to be added/removed from the node until timeout occurs, whichever comes first.
|
||||
func WaitForNodeHasTaintOrNot(c clientset.Interface, nodeName string, taint *v1.Taint, wantTrue bool, timeout time.Duration) error {
|
||||
if err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
|
||||
has, err := NodeHasTaint(c, nodeName, taint)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to check taint %s on node %s or not", taint.ToString(), nodeName)
|
||||
}
|
||||
return has == wantTrue, nil
|
||||
}); err != nil {
|
||||
return fmt.Errorf("expect node %v to have taint = %v within %v: %v", nodeName, wantTrue, timeout, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
26
vendor/k8s.io/kubernetes/test/e2e/framework/viperconfig/BUILD
generated
vendored
Normal file
26
vendor/k8s.io/kubernetes/test/e2e/framework/viperconfig/BUILD
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["viperconfig.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/framework/viperconfig",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/github.com/spf13/viper:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
142
vendor/k8s.io/kubernetes/test/e2e/framework/viperconfig/viperconfig.go
generated
vendored
Normal file
142
vendor/k8s.io/kubernetes/test/e2e/framework/viperconfig/viperconfig.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package viperconfig
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
viperFileNotFound = "Unsupported Config Type \"\""
|
||||
)
|
||||
|
||||
// ViperizeFlags checks whether a configuration file was specified, reads it, and updates
|
||||
// the configuration variables accordingly. Must be called after framework.HandleFlags()
|
||||
// and before framework.AfterReadingAllFlags().
|
||||
//
|
||||
// The logic is so that a required configuration file must be present. If empty,
|
||||
// the optional configuration file is used instead, unless also empty.
|
||||
//
|
||||
// Files can be specified with just a base name ("e2e", matches "e2e.json/yaml/..." in
|
||||
// the current directory) or with path and suffix.
|
||||
func ViperizeFlags(requiredConfig, optionalConfig string) error {
|
||||
viperConfig := optionalConfig
|
||||
required := false
|
||||
if requiredConfig != "" {
|
||||
viperConfig = requiredConfig
|
||||
required = true
|
||||
}
|
||||
if viperConfig == "" {
|
||||
return nil
|
||||
}
|
||||
viper.SetConfigName(filepath.Base(viperConfig))
|
||||
viper.AddConfigPath(filepath.Dir(viperConfig))
|
||||
wrapError := func(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
errorPrefix := fmt.Sprintf("viper config %q", viperConfig)
|
||||
actualFile := viper.ConfigFileUsed()
|
||||
if actualFile != "" && actualFile != viperConfig {
|
||||
errorPrefix = fmt.Sprintf("%s = %q", errorPrefix, actualFile)
|
||||
}
|
||||
return errors.Wrap(err, errorPrefix)
|
||||
}
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
// If the user specified a file suffix, the Viper won't
|
||||
// find the file because it always appends its known set
|
||||
// of file suffices. Therefore try once more without
|
||||
// suffix.
|
||||
ext := filepath.Ext(viperConfig)
|
||||
if ext != "" && err.Error() == viperFileNotFound {
|
||||
viper.SetConfigName(filepath.Base(viperConfig[0 : len(viperConfig)-len(ext)]))
|
||||
err = viper.ReadInConfig()
|
||||
}
|
||||
if err != nil {
|
||||
// If a config was required, then parsing must
|
||||
// succeed. This catches syntax errors and
|
||||
// "file not found". Unfortunately error
|
||||
// messages are sometimes hard to understand,
|
||||
// so try to help the user a bit.
|
||||
switch err.Error() {
|
||||
case viperFileNotFound:
|
||||
if required {
|
||||
return wrapError(errors.New("not found or not using a supported file format"))
|
||||
}
|
||||
// Proceed without config.
|
||||
return nil
|
||||
default:
|
||||
// Something isn't right in the file.
|
||||
return wrapError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update all flag values not already set with values found
|
||||
// via Viper. We do this ourselves instead of calling
|
||||
// something like viper.Unmarshal(&TestContext) because we
|
||||
// want to support all values, regardless where they are
|
||||
// stored.
|
||||
return wrapError(viperUnmarshal())
|
||||
}
|
||||
|
||||
// viperUnmarshall updates all command line flags with the corresponding values found
|
||||
// via Viper, regardless whether the flag value is stored in TestContext, some other
|
||||
// context or a local variable.
|
||||
func viperUnmarshal() error {
|
||||
var result error
|
||||
set := make(map[string]bool)
|
||||
|
||||
// Determine which values were already set explicitly via
|
||||
// flags. Those we don't overwrite because command line
|
||||
// flags have a higher priority.
|
||||
flag.Visit(func(f *flag.Flag) {
|
||||
set[f.Name] = true
|
||||
})
|
||||
|
||||
flag.VisitAll(func(f *flag.Flag) {
|
||||
if result != nil ||
|
||||
set[f.Name] ||
|
||||
!viper.IsSet(f.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
// In contrast to viper.Unmarshal(), values
|
||||
// that have the wrong type (for example, a
|
||||
// list instead of a plain string) will not
|
||||
// trigger an error here. This could be fixed
|
||||
// by checking the type ourselves, but
|
||||
// probably isn't worth the effort.
|
||||
//
|
||||
// "%v" correctly turns bool, int, strings into
|
||||
// the representation expected by flag, so those
|
||||
// can be used in config files. Plain strings
|
||||
// always work there, just as on the command line.
|
||||
str := fmt.Sprintf("%v", viper.Get(f.Name))
|
||||
if err := f.Value.Set(str); err != nil {
|
||||
result = fmt.Errorf("setting option %q from config file value: %s", f.Name, err)
|
||||
}
|
||||
})
|
||||
|
||||
return result
|
||||
}
|
99
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
99
vendor/k8s.io/kubernetes/test/e2e/framework/volume_util.go
generated
vendored
@ -41,16 +41,17 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/golang/glog"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@ -66,7 +67,7 @@ const (
|
||||
TiB int64 = 1024 * GiB
|
||||
|
||||
// Waiting period for volume server (Ceph, ...) to initialize itself.
|
||||
VolumeServerPodStartupSleep = 20 * time.Second
|
||||
VolumeServerPodStartupTimeout = 3 * time.Minute
|
||||
|
||||
// Waiting period for pod to be cleaned up and unmount its volumes so we
|
||||
// don't tear down containers with NFS/Ceph/Gluster server too early.
|
||||
@ -92,6 +93,8 @@ type VolumeTestConfig struct {
|
||||
// map <host (source) path> -> <container (dst.) path>
|
||||
// if <host (source) path> is empty, mount a tmpfs emptydir
|
||||
ServerVolumes map[string]string
|
||||
// Message to wait for before starting clients
|
||||
ServerReadyMessage string
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
WaitForCompletion bool
|
||||
@ -114,11 +117,12 @@ type VolumeTest struct {
|
||||
// NFS-specific wrapper for CreateStorageServer.
|
||||
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
|
||||
config = VolumeTestConfig{
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
ServerPorts: []int{2049},
|
||||
ServerVolumes: map[string]string{"": "/exports"},
|
||||
Namespace: namespace,
|
||||
Prefix: "nfs",
|
||||
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
|
||||
ServerPorts: []int{2049},
|
||||
ServerVolumes: map[string]string{"": "/exports"},
|
||||
ServerReadyMessage: "NFS started",
|
||||
}
|
||||
if len(args) > 0 {
|
||||
config.ServerArgs = args
|
||||
@ -180,6 +184,7 @@ func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTest
|
||||
// iSCSI container needs to insert modules from the host
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
ServerReadyMessage: "Configuration restored from /etc/target/saveconfig.json",
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
return config, pod, ip
|
||||
@ -195,16 +200,9 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestCo
|
||||
ServerVolumes: map[string]string{
|
||||
"/lib/modules": "/lib/modules",
|
||||
},
|
||||
ServerReadyMessage: "Ceph is ready",
|
||||
}
|
||||
pod, ip = CreateStorageServer(cs, config)
|
||||
|
||||
// Ceph server container needs some time to start. Tests continue working if
|
||||
// this sleep is removed, however kubelet logs (and kubectl describe
|
||||
// <client pod>) would be cluttered with error messages about non-existing
|
||||
// image.
|
||||
Logf("sleeping a bit to give ceph server time to initialize")
|
||||
time.Sleep(VolumeServerPodStartupSleep)
|
||||
|
||||
// create secrets for the server
|
||||
secret = &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -350,40 +348,52 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.
|
||||
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
|
||||
}
|
||||
}
|
||||
if config.ServerReadyMessage != "" {
|
||||
_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
|
||||
ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
|
||||
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
|
||||
CleanUpVolumeServerWithSecret(f, serverPod, nil)
|
||||
}
|
||||
|
||||
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
|
||||
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
if secret != nil {
|
||||
Logf("Deleting server secret %q...", secret.Name)
|
||||
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
Logf("Delete secret failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
Logf("Deleting server pod %q...", serverPod.Name)
|
||||
err := DeletePodWithWait(f, cs, serverPod)
|
||||
if err != nil {
|
||||
Logf("Server pod delete failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean both server and client pods.
|
||||
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
|
||||
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
|
||||
|
||||
defer GinkgoRecover()
|
||||
|
||||
client := f.ClientSet
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
cs := f.ClientSet
|
||||
|
||||
err := podClient.Delete(config.Prefix+"-client", nil)
|
||||
if err != nil {
|
||||
// Log the error before failing test: if the test has already failed,
|
||||
// framework.ExpectNoError() won't print anything to logs!
|
||||
glog.Warningf("Failed to delete client pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete client pod: %v", err)
|
||||
}
|
||||
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace)
|
||||
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
|
||||
|
||||
if config.ServerImage != "" {
|
||||
if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) {
|
||||
ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
|
||||
}
|
||||
// See issue #24100.
|
||||
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
|
||||
By("sleeping a bit so kubelet can unmount and detach the volume")
|
||||
time.Sleep(PodCleanupTimeout)
|
||||
|
||||
err = podClient.Delete(config.Prefix+"-server", nil)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to delete server pod: %v", err)
|
||||
ExpectNoError(err, "Failed to delete server pod: %v", err)
|
||||
}
|
||||
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace)
|
||||
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
@ -435,9 +445,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
||||
}
|
||||
podsNamespacer := client.CoreV1().Pods(config.Namespace)
|
||||
|
||||
if fsGroup != nil {
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
}
|
||||
clientPod.Spec.SecurityContext.FSGroup = fsGroup
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||
@ -476,6 +484,8 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro
|
||||
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
|
||||
By(fmt.Sprint("starting ", config.Prefix, " injector"))
|
||||
podClient := client.CoreV1().Pods(config.Namespace)
|
||||
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4))
|
||||
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4))
|
||||
|
||||
injectPod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
@ -483,7 +493,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-injector",
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"role": config.Prefix + "-injector",
|
||||
},
|
||||
@ -497,7 +507,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
||||
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
Name: volMountName,
|
||||
MountPath: "/mnt",
|
||||
},
|
||||
},
|
||||
@ -511,16 +521,17 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: config.Prefix + "-volume",
|
||||
Name: volMountName,
|
||||
VolumeSource: volume,
|
||||
},
|
||||
},
|
||||
NodeName: config.ClientNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
|
||||
defer func() {
|
||||
podClient.Delete(config.Prefix+"-injector", nil)
|
||||
podClient.Delete(podName, nil)
|
||||
}()
|
||||
|
||||
injectPod, err := podClient.Create(injectPod)
|
||||
|
Reference in New Issue
Block a user