mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor updates
This commit is contained in:
6
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/test/integration/framework/BUILD
generated
vendored
@ -22,13 +22,8 @@ go_library(
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/policy/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/replication:go_default_library",
|
||||
"//pkg/generated/openapi:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubelet/client:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/util/env:go_default_library",
|
||||
@ -66,7 +61,6 @@ go_library(
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
158
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
158
vendor/k8s.io/kubernetes/test/integration/framework/master_utils.go
generated
vendored
@ -21,9 +21,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"path"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/spec"
|
||||
@ -45,7 +42,6 @@ import (
|
||||
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
authauthorizer "k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
|
||||
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
@ -53,16 +49,12 @@ import (
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/client-go/informers"
|
||||
extinformers "k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/pkg/generated/openapi"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
@ -70,37 +62,6 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
)
|
||||
|
||||
const (
|
||||
// Timeout used in benchmarks, to eg: scale an rc
|
||||
DefaultTimeout = 30 * time.Minute
|
||||
|
||||
// Rc manifest used to create pods for benchmarks.
|
||||
// TODO: Convert this to a full path?
|
||||
TestRCManifest = "benchmark-controller.json"
|
||||
)
|
||||
|
||||
// MasterComponents is a control struct for all master components started via NewMasterComponents.
|
||||
// TODO: Include all master components (scheduler, nodecontroller).
|
||||
// TODO: Reconcile with integration.go, currently the master used there doesn't understand
|
||||
// how to restart cleanly, which is required for each iteration of a benchmark. The integration
|
||||
// tests also don't make it easy to isolate and turn off components at will.
|
||||
type MasterComponents struct {
|
||||
// Raw http server in front of the master
|
||||
ApiServer *httptest.Server
|
||||
// Kubernetes master, contains an embedded etcd storage
|
||||
KubeMaster *master.Master
|
||||
// Restclient used to talk to the kubernetes master
|
||||
ClientSet clientset.Interface
|
||||
// Replication controller manager
|
||||
ControllerManager *replicationcontroller.ReplicationManager
|
||||
// CloseFn shuts down the server
|
||||
CloseFn CloseFunc
|
||||
// Channel for stop signals to rc manager
|
||||
rcStopCh chan struct{}
|
||||
// Used to stop master components individually, and via MasterComponents.Stop
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// Config is a struct of configuration directives for NewMasterComponents.
|
||||
type Config struct {
|
||||
// If nil, a default is used, partially filled configs will not get populated.
|
||||
@ -113,37 +74,10 @@ type Config struct {
|
||||
// TODO: Add configs for endpoints controller, scheduler etc
|
||||
}
|
||||
|
||||
// NewMasterComponents creates, initializes and starts master components based on the given config.
|
||||
func NewMasterComponents(c *Config) *MasterComponents {
|
||||
m, s, closeFn := startMasterOrDie(c.MasterConfig, nil, nil)
|
||||
// TODO: Allow callers to pipe through a different master url and create a client/start components using it.
|
||||
glog.Infof("Master %+v", s.URL)
|
||||
// TODO: caesarxuchao: remove this client when the refactoring of client libraray is done.
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}, QPS: c.QPS, Burst: c.Burst})
|
||||
rcStopCh := make(chan struct{})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
controllerManager := replicationcontroller.NewReplicationManager(informerFactory.Core().V1().Pods(), informerFactory.Core().V1().ReplicationControllers(), clientset, c.Burst)
|
||||
|
||||
// TODO: Support events once we can cleanly shutdown an event recorder.
|
||||
controllerManager.SetEventRecorder(&record.FakeRecorder{})
|
||||
if c.StartReplicationManager {
|
||||
informerFactory.Start(rcStopCh)
|
||||
go controllerManager.Run(goruntime.NumCPU(), rcStopCh)
|
||||
}
|
||||
return &MasterComponents{
|
||||
ApiServer: s,
|
||||
KubeMaster: m,
|
||||
ClientSet: clientset,
|
||||
ControllerManager: controllerManager,
|
||||
CloseFn: closeFn,
|
||||
rcStopCh: rcStopCh,
|
||||
}
|
||||
}
|
||||
|
||||
// alwaysAllow always allows an action
|
||||
type alwaysAllow struct{}
|
||||
|
||||
func (alwaysAllow) Authorize(requestAttributes authauthorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
func (alwaysAllow) Authorize(requestAttributes authorizer.Attributes) (authorizer.Decision, string, error) {
|
||||
return authorizer.DecisionAllow, "always allow", nil
|
||||
}
|
||||
|
||||
@ -225,17 +159,17 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
}
|
||||
|
||||
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)
|
||||
if masterConfig.GenericConfig.Authenticator == nil {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
|
||||
if masterConfig.GenericConfig.Authentication.Authenticator == nil {
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, authauthenticator.RequestFunc(alwaysEmpty))
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authenticator)
|
||||
masterConfig.GenericConfig.Authentication.Authenticator = authenticatorunion.New(tokenAuthenticator, masterConfig.GenericConfig.Authentication.Authenticator)
|
||||
}
|
||||
|
||||
if masterConfig.GenericConfig.Authorizer != nil {
|
||||
if masterConfig.GenericConfig.Authorization.Authorizer != nil {
|
||||
tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
|
||||
masterConfig.GenericConfig.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorizer)
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = authorizerunion.New(tokenAuthorizer, masterConfig.GenericConfig.Authorization.Authorizer)
|
||||
} else {
|
||||
masterConfig.GenericConfig.Authorizer = alwaysAllow{}
|
||||
masterConfig.GenericConfig.Authorization.Authorizer = alwaysAllow{}
|
||||
}
|
||||
|
||||
masterConfig.GenericConfig.LoopbackClientConfig.BearerToken = privilegedLoopbackToken
|
||||
@ -245,7 +179,7 @@ func startMasterOrDie(masterConfig *master.Config, incomingServer *httptest.Serv
|
||||
glog.Fatal(err)
|
||||
}
|
||||
|
||||
sharedInformers := extinformers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
sharedInformers := informers.NewSharedInformerFactory(clientset, masterConfig.GenericConfig.LoopbackClientConfig.Timeout)
|
||||
m, err = masterConfig.Complete(sharedInformers).New(genericapiserver.EmptyDelegate)
|
||||
if err != nil {
|
||||
closeFn()
|
||||
@ -300,7 +234,10 @@ func NewMasterConfig() *master.Config {
|
||||
// FIXME (soltysh): this GroupVersionResource override should be configurable
|
||||
// we need to set both for the whole group and for cronjobs, separately
|
||||
resourceEncoding.SetVersionEncoding(batch.GroupName, *testapi.Batch.GroupVersion(), schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: "batch", Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: batch.GroupName, Resource: "cronjobs"}, schema.GroupVersion{Group: batch.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: batch.GroupName, Version: runtime.APIVersionInternal})
|
||||
// we also need to set both for the storage group and for volumeattachments, separately
|
||||
resourceEncoding.SetVersionEncoding(storage.GroupName, *testapi.Storage.GroupVersion(), schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
|
||||
resourceEncoding.SetResourceEncoding(schema.GroupResource{Group: storage.GroupName, Resource: "volumeattachments"}, schema.GroupVersion{Group: storage.GroupName, Version: "v1beta1"}, schema.GroupVersion{Group: storage.GroupName, Version: runtime.APIVersionInternal})
|
||||
|
||||
storageFactory := serverstorage.NewDefaultStorageFactory(etcdOptions.StorageConfig, runtime.ContentTypeJSON, ns, resourceEncoding, master.DefaultAPIResourceConfigSource(), nil)
|
||||
storageFactory.SetSerializer(
|
||||
@ -343,7 +280,7 @@ func NewMasterConfig() *master.Config {
|
||||
genericConfig := genericapiserver.NewConfig(legacyscheme.Codecs)
|
||||
kubeVersion := version.Get()
|
||||
genericConfig.Version = &kubeVersion
|
||||
genericConfig.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.Authorization.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
|
||||
genericConfig.AdmissionControl = admit.NewAlwaysAdmit()
|
||||
genericConfig.EnableMetrics = true
|
||||
|
||||
@ -374,22 +311,6 @@ func NewIntegrationTestMasterConfig() *master.Config {
|
||||
return masterConfig
|
||||
}
|
||||
|
||||
func (m *MasterComponents) stopRCManager() {
|
||||
close(m.rcStopCh)
|
||||
}
|
||||
|
||||
func (m *MasterComponents) Stop(apiServer, rcManager bool) {
|
||||
glog.Infof("Stopping master components")
|
||||
if rcManager {
|
||||
// Ordering matters because the apiServer will only shutdown when pending
|
||||
// requests are done
|
||||
m.once.Do(m.stopRCManager)
|
||||
}
|
||||
if apiServer {
|
||||
m.CloseFn()
|
||||
}
|
||||
}
|
||||
|
||||
// CloseFunc can be called to cleanup the master
|
||||
type CloseFunc func()
|
||||
|
||||
@ -406,59 +327,6 @@ func RunAMasterUsingServer(masterConfig *master.Config, s *httptest.Server, mast
|
||||
return startMasterOrDie(masterConfig, s, masterReceiver)
|
||||
}
|
||||
|
||||
// Task is a function passed to worker goroutines by RunParallel.
|
||||
// The function needs to implement its own thread safety.
|
||||
type Task func(id int) error
|
||||
|
||||
// RunParallel spawns a goroutine per task in the given queue
|
||||
func RunParallel(task Task, numTasks, numWorkers int) {
|
||||
start := time.Now()
|
||||
if numWorkers <= 0 {
|
||||
numWorkers = numTasks
|
||||
}
|
||||
defer func() {
|
||||
glog.Infof("RunParallel took %v for %d tasks and %d workers", time.Since(start), numTasks, numWorkers)
|
||||
}()
|
||||
var wg sync.WaitGroup
|
||||
semCh := make(chan struct{}, numWorkers)
|
||||
wg.Add(numTasks)
|
||||
for id := 0; id < numTasks; id++ {
|
||||
go func(id int) {
|
||||
semCh <- struct{}{}
|
||||
err := task(id)
|
||||
if err != nil {
|
||||
glog.Fatalf("Worker failed with %v", err)
|
||||
}
|
||||
<-semCh
|
||||
wg.Done()
|
||||
}(id)
|
||||
}
|
||||
wg.Wait()
|
||||
close(semCh)
|
||||
}
|
||||
|
||||
// FindFreeLocalPort returns the number of an available port number on
|
||||
// the loopback interface. Useful for determining the port to launch
|
||||
// a server on. Error handling required - there is a non-zero chance
|
||||
// that the returned port number will be bound by another process
|
||||
// after this function returns.
|
||||
func FindFreeLocalPort() (int, error) {
|
||||
l, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer l.Close()
|
||||
_, portStr, err := net.SplitHostPort(l.Addr().String())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// SharedEtcd creates a storage config for a shared etcd instance, with a unique prefix.
|
||||
func SharedEtcd() *storagebackend.Config {
|
||||
cfg := storagebackend.NewDefaultConfig(path.Join(uuid.New(), "registry"), nil)
|
||||
|
9
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
9
vendor/k8s.io/kubernetes/test/integration/framework/perf_utils.go
generated
vendored
@ -73,7 +73,14 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
},
|
||||
}
|
||||
for i := 0; i < numNodes; i++ {
|
||||
if _, err := p.client.CoreV1().Nodes().Create(baseNode); err != nil {
|
||||
var err error
|
||||
for retry := 0; retry < retries; retry++ {
|
||||
_, err = p.client.CoreV1().Nodes().Create(baseNode)
|
||||
if err == nil || !e2eframework.IsRetryableAPIError(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating node: %v", err)
|
||||
}
|
||||
}
|
||||
|
62
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
62
vendor/k8s.io/kubernetes/test/integration/framework/util.go
generated
vendored
@ -19,29 +19,20 @@ limitations under the License.
|
||||
package framework
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
)
|
||||
|
||||
const (
|
||||
// When these values are updated, also update cmd/kubelet/app/options/options.go
|
||||
// A copy of these values exist in e2e/framework/util.go.
|
||||
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
|
||||
currentPodInfraContainerImageVersion = "3.0"
|
||||
// When these values are updated, also update cmd/kubelet/app/options/container_runtime.go
|
||||
// A copy of these values exist in test/utils/image/manifest.go
|
||||
currentPodInfraContainerImageName = "k8s.gcr.io/pause"
|
||||
currentPodInfraContainerImageVersion = "3.1"
|
||||
)
|
||||
|
||||
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
|
||||
@ -80,48 +71,3 @@ func CreateTestingNamespace(baseName string, apiserver *httptest.Server, t *test
|
||||
func DeleteTestingNamespace(ns *v1.Namespace, apiserver *httptest.Server, t *testing.T) {
|
||||
// TODO: Remove all resources from a given namespace once we implement CreateTestingNamespace.
|
||||
}
|
||||
|
||||
// RCFromManifest reads a .json file and returns the rc in it.
|
||||
func RCFromManifest(fileName string) *v1.ReplicationController {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
var controller v1.ReplicationController
|
||||
if err := runtime.DecodeInto(testapi.Default.Codec(), data, &controller); err != nil {
|
||||
glog.Fatalf("Unexpected error reading rc manifest %v", err)
|
||||
}
|
||||
return &controller
|
||||
}
|
||||
|
||||
// StopRC stops the rc via kubectl's stop library
|
||||
func StopRC(rc *v1.ReplicationController, clientset internalclientset.Interface) error {
|
||||
reaper, err := kubectl.ReaperFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil || reaper == nil {
|
||||
return err
|
||||
}
|
||||
err = reaper.Stop(rc.Namespace, rc.Name, 0, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int32, clientset internalclientset.Interface) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
|
||||
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scaled, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user