mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 10:53:34 +00:00
vendor files
This commit is contained in:
42
vendor/k8s.io/kubernetes/test/soak/cauldron/BUILD
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/test/soak/cauldron/BUILD
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "cauldron",
|
||||
importpath = "k8s.io/kubernetes/test/soak/cauldron",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["cauldron.go"],
|
||||
importpath = "k8s.io/kubernetes/test/soak/cauldron",
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
18
vendor/k8s.io/kubernetes/test/soak/cauldron/Dockerfile
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/test/soak/cauldron/Dockerfile
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM busybox
|
||||
ADD cauldron cauldron
|
||||
ADD cauldron.go cauldron.go
|
||||
ENTRYPOINT ["/cauldron"]
|
44
vendor/k8s.io/kubernetes/test/soak/cauldron/Makefile
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/test/soak/cauldron/Makefile
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: cauldron
|
||||
|
||||
TAG = 1.0
|
||||
|
||||
cauldron:
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -a -installsuffix cgo --ldflags '-w' cauldron.go
|
||||
|
||||
container: cauldron
|
||||
docker build --pull -t kubernetes/cauldron:$(TAG) .
|
||||
|
||||
push:
|
||||
docker push kubernetes/cauldron:$(TAG)
|
||||
|
||||
rc:
|
||||
kubectl create --validate -f cauldron-rc.yaml
|
||||
|
||||
stop:
|
||||
kubectl delete rc cauldron
|
||||
|
||||
get:
|
||||
kubectl get rc,pods -l app=cauldron
|
||||
|
||||
scale20:
|
||||
kubectl scale rc cauldron --replicas=20
|
||||
|
||||
scale1:
|
||||
kubectl scale rc cauldron --replicas=1
|
||||
|
||||
clean:
|
||||
rm -rf cauldron
|
22
vendor/k8s.io/kubernetes/test/soak/cauldron/cauldron-rc.yaml
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/test/soak/cauldron/cauldron-rc.yaml
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: cauldron
|
||||
namespace: default
|
||||
labels:
|
||||
app: cauldron
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: cauldron
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cauldron
|
||||
spec:
|
||||
containers:
|
||||
- name: cauldron
|
||||
image: kubernetes/cauldron:1.0
|
||||
imagePullPolicy: Always
|
||||
args: ["--up_to=-1"]
|
||||
|
311
vendor/k8s.io/kubernetes/test/soak/cauldron/cauldron.go
generated
vendored
Normal file
311
vendor/k8s.io/kubernetes/test/soak/cauldron/cauldron.go
generated
vendored
Normal file
@ -0,0 +1,311 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
This soak tests places a specified number of pods on each node and then
|
||||
repeatedly sends queries to a service running on these pods via
|
||||
a service.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
queriesAverage = flag.Int("queries", 100, "Number of hostname queries to make in each iteration per pod on average")
|
||||
podsPerNode = flag.Int("pods_per_node", 1, "Number of serve_hostname pods per node")
|
||||
upTo = flag.Int("up_to", 1, "Number of iterations or -1 for no limit")
|
||||
maxPar = flag.Int("max_in_flight", 100, "Maximum number of queries in flight")
|
||||
)
|
||||
|
||||
const (
|
||||
deleteTimeout = 2 * time.Minute
|
||||
endpointTimeout = 5 * time.Minute
|
||||
nodeListTimeout = 2 * time.Minute
|
||||
podCreateTimeout = 2 * time.Minute
|
||||
podStartTimeout = 30 * time.Minute
|
||||
serviceCreateTimeout = 2 * time.Minute
|
||||
namespaceDeleteTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
glog.Infof("Starting cauldron soak test with queries=%d podsPerNode=%d upTo=%d maxPar=%d",
|
||||
*queriesAverage, *podsPerNode, *upTo, *maxPar)
|
||||
|
||||
cc, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
|
||||
client, err := clientset.NewForConfig(cc)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
|
||||
var nodes *api.NodeList
|
||||
for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
|
||||
nodes, err = client.Core().Nodes().List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("Failed to list nodes: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Giving up trying to list nodes: %v", err)
|
||||
}
|
||||
|
||||
if len(nodes.Items) == 0 {
|
||||
glog.Fatalf("Failed to find any nodes.")
|
||||
}
|
||||
|
||||
glog.Infof("Found %d nodes on this cluster:", len(nodes.Items))
|
||||
for i, node := range nodes.Items {
|
||||
glog.Infof("%d: %s", i, node.Name)
|
||||
}
|
||||
|
||||
queries := *queriesAverage * len(nodes.Items) * *podsPerNode
|
||||
|
||||
// Create a uniquely named namespace.
|
||||
got, err := client.Core().Namespaces().Create(&api.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create namespace: %v", err)
|
||||
}
|
||||
ns := got.Name
|
||||
defer func(ns string) {
|
||||
if err := client.Core().Namespaces().Delete(ns, nil); err != nil {
|
||||
glog.Warningf("Failed to delete namespace ns: %e", ns, err)
|
||||
} else {
|
||||
// wait until the namespace disappears
|
||||
for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ {
|
||||
if _, err := client.Core().Namespaces().Get(ns, metav1.GetOptions{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}(ns)
|
||||
glog.Infof("Created namespace %s", ns)
|
||||
|
||||
// Create a service for these pods.
|
||||
glog.Infof("Creating service %s/serve-hostnames", ns)
|
||||
// Make several attempts to create a service.
|
||||
var svc *api.Service
|
||||
for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
|
||||
t := time.Now()
|
||||
svc, err = client.Core().Services(ns).Create(&api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "serve-hostnames",
|
||||
Labels: map[string]string{
|
||||
"name": "serve-hostname",
|
||||
},
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Port: 9376,
|
||||
TargetPort: intstr.FromInt(9376),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"name": "serve-hostname",
|
||||
},
|
||||
},
|
||||
})
|
||||
glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err)
|
||||
return
|
||||
}
|
||||
// Clean up service
|
||||
defer func() {
|
||||
glog.Infof("Cleaning up service %s/serve-hostnames", ns)
|
||||
// Make several attempts to delete the service.
|
||||
for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
|
||||
if err := client.Core().Services(ns).Delete(svc.Name, nil); err == nil {
|
||||
return
|
||||
}
|
||||
glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Put serve-hostname pods on each node.
|
||||
podNames := []string{}
|
||||
for i, node := range nodes.Items {
|
||||
for j := 0; j < *podsPerNode; j++ {
|
||||
podName := fmt.Sprintf("serve-hostname-%d-%d", i, j)
|
||||
podNames = append(podNames, podName)
|
||||
// Make several attempts
|
||||
for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) {
|
||||
glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name)
|
||||
t := time.Now()
|
||||
_, err = client.Core().Pods(ns).Create(&api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"name": "serve-hostname",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "serve-hostname",
|
||||
Image: framework.ServeHostnameImage,
|
||||
Ports: []api.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
NodeName: node.Name,
|
||||
},
|
||||
})
|
||||
glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean up the pods
|
||||
defer func() {
|
||||
glog.Info("Cleaning up pods")
|
||||
// Make several attempts to delete the pods.
|
||||
for _, podName := range podNames {
|
||||
for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
|
||||
if err = client.Core().Pods(ns).Delete(podName, nil); err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
glog.Info("Waiting for the serve-hostname pods to be ready")
|
||||
for _, podName := range podNames {
|
||||
var pod *api.Pod
|
||||
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
|
||||
pod, err = client.Core().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout)
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == api.PodRunning {
|
||||
break
|
||||
}
|
||||
}
|
||||
if pod.Status.Phase != api.PodRunning {
|
||||
glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase)
|
||||
} else {
|
||||
glog.Infof("%s/%s is running", ns, podName)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for the endpoints to propagate.
|
||||
for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) {
|
||||
_, err = http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Infof("After %v while making a request got error %v", time.Since(start), err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get a response from service: %v", err)
|
||||
}
|
||||
|
||||
// Repeatedly make requests.
|
||||
for iteration := 0; iteration != *upTo; iteration++ {
|
||||
responseChan := make(chan string, queries)
|
||||
// Use a channel of size *maxPar to throttle the number
|
||||
// of in-flight requests to avoid overloading the service.
|
||||
inFlight := make(chan struct{}, *maxPar)
|
||||
start := time.Now()
|
||||
for q := 0; q < queries; q++ {
|
||||
go func(i int, query int) {
|
||||
inFlight <- struct{}{}
|
||||
t := time.Now()
|
||||
resp, err := http.Get(fmt.Sprintf("http://serve-hostnames.%s:9376", ns))
|
||||
glog.V(4).Infof("Call to serve-hostnames in namespace %s took %v", ns, time.Since(t))
|
||||
if err != nil {
|
||||
glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err)
|
||||
// If the query failed return a string which starts with a character
|
||||
// that can't be part of a hostname.
|
||||
responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
hostname, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
responseChan <- fmt.Sprintf("!failed in iteration %d to read body of response: %v", i, err)
|
||||
} else {
|
||||
responseChan <- string(hostname)
|
||||
}
|
||||
}
|
||||
<-inFlight
|
||||
}(iteration, q)
|
||||
}
|
||||
responses := make(map[string]int, *podsPerNode*len(nodes.Items))
|
||||
missing := 0
|
||||
for q := 0; q < queries; q++ {
|
||||
r := <-responseChan
|
||||
glog.V(4).Infof("Got response from %s", r)
|
||||
responses[r]++
|
||||
// If the returned hostname starts with '!' then it indicates
|
||||
// an error response.
|
||||
if len(r) > 0 && r[0] == '!' {
|
||||
glog.V(3).Infof("Got response %s", r)
|
||||
missing++
|
||||
}
|
||||
}
|
||||
if missing > 0 {
|
||||
glog.Warningf("Missing %d responses out of %d", missing, queries)
|
||||
}
|
||||
// Report any nodes that did not respond.
|
||||
for n, node := range nodes.Items {
|
||||
for i := 0; i < *podsPerNode; i++ {
|
||||
name := fmt.Sprintf("serve-hostname-%d-%d", n, i)
|
||||
if _, ok := responses[name]; !ok {
|
||||
glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing",
|
||||
iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing)
|
||||
}
|
||||
}
|
45
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/BUILD
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/BUILD
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "serve_hostnames",
|
||||
importpath = "k8s.io/kubernetes/test/soak/serve_hostnames",
|
||||
library = ":go_default_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["serve_hostnames.go"],
|
||||
importpath = "k8s.io/kubernetes/test/soak/serve_hostnames",
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/Makefile
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/Makefile
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
all:
|
||||
go build serve_hostnames.go
|
||||
|
||||
clean:
|
||||
rm -rf serve_hostnames
|
171
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/README.md
generated
vendored
Normal file
171
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/README.md
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
# Soak Test serve_hostnames
|
||||
This directory contains the source for a soak test `serve_hostnames` which performs the following actions when used with the GCE provider:
|
||||
|
||||
* A connection is established to the master of the cluster identified from the current context set in `$HOME/.kube/.kubeconfig`.
|
||||
* The nodes available on the cluster are enumerated (say *N* nodes).
|
||||
* On each node, *M* pods are created (by default 1). The pod encapsulates the `serve_hostnames` image which simply returns the name of the pod in response to a `GET` request.
|
||||
The pods are created individually (i.e. not with a replication controller).
|
||||
* A service is created which maps to these pods.
|
||||
* The program makes *I* iterations (default 1) where it issues *QxNxM* queries (*Q* default is 10) via the service proxy interface at the master.
|
||||
* The program verifies that every pod (and thus every node) responded to at least one query (the average should be about *Q*).
|
||||
* The time taken to perform various operations is reported and some operations are re-tried if they failed.
|
||||
|
||||
Here is some representative output.
|
||||
```
|
||||
$ ./serve_hostnames
|
||||
I0326 14:21:04.179893 11434 serve_hostnames.go:60] Starting serve_hostnames soak test with queries=10 and podsPerNode=1 upTo=1
|
||||
I0326 14:21:04.507252 11434 serve_hostnames.go:85] Nodes found on this cluster:
|
||||
I0326 14:21:04.507282 11434 serve_hostnames.go:87] 0: kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.507297 11434 serve_hostnames.go:87] 1: kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.507309 11434 serve_hostnames.go:87] 2: kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.507320 11434 serve_hostnames.go:87] 3: kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.507347 11434 serve_hostnames.go:95] Using namespace serve-hostnames-8145 for this test.
|
||||
I0326 14:21:04.507363 11434 serve_hostnames.go:98] Creating service serve-hostnames-8145/serve-hostnames
|
||||
I0326 14:21:04.559849 11434 serve_hostnames.go:148] Creating pod serve-hostnames-8145/serve-hostname-0-0 on node kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.605603 11434 serve_hostnames.go:148] Creating pod serve-hostnames-8145/serve-hostname-1-0 on node kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.662099 11434 serve_hostnames.go:148] Creating pod serve-hostnames-8145/serve-hostname-2-0 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.707179 11434 serve_hostnames.go:148] Creating pod serve-hostnames-8145/serve-hostname-3-0 on node kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:21:04.757646 11434 serve_hostnames.go:194] Waiting for the serve-hostname pods to be ready
|
||||
I0326 14:23:31.125188 11434 serve_hostnames.go:211] serve-hostnames-8145/serve-hostname-0-0 is running
|
||||
I0326 14:23:31.165984 11434 serve_hostnames.go:211] serve-hostnames-8145/serve-hostname-1-0 is running
|
||||
I0326 14:25:22.213751 11434 serve_hostnames.go:211] serve-hostnames-8145/serve-hostname-2-0 is running
|
||||
I0326 14:25:37.387257 11434 serve_hostnames.go:211] serve-hostnames-8145/serve-hostname-3-0 is running
|
||||
W0326 14:25:39.243813 11434 serve_hostnames.go:265] No response from pod serve-hostname-3-0 on node kubernetes-node-jay1.c.kubernetes-satnam.internal at iteration 0
|
||||
I0326 14:25:39.243844 11434 serve_hostnames.go:269] Iteration 0 took 1.814483599s for 40 queries (22.04 QPS)
|
||||
I0326 14:25:39.243871 11434 serve_hostnames.go:182] Cleaning up pods
|
||||
I0326 14:25:39.434619 11434 serve_hostnames.go:130] Cleaning up service serve-hostnames-8145/server-hostnames
|
||||
```
|
||||
|
||||
The pods are named with -*N*-*M* suffixes which identify the number of the node *N* and the number of the pod *M* on that node.
|
||||
Notice that in this run the pod (number 0) running on node 3 did not respond to any queries.
|
||||
|
||||
The number of iterations to perform for issuing queries can be changed from the default of 1 to some higher value e.g. `--up_to=3` and the number of pods per node can also be changed e.g. `--pods_per_node=2`:
|
||||
|
||||
```
|
||||
$ ./serve_hostnames --up_to=3 --pods_per_node=2
|
||||
I0326 14:27:27.584378 11808 serve_hostnames.go:60] Starting serve_hostnames soak test with queries=10 and podsPerNode=2 upTo=3
|
||||
I0326 14:27:27.913713 11808 serve_hostnames.go:85] Nodes found on this cluster:
|
||||
I0326 14:27:27.913774 11808 serve_hostnames.go:87] 0: kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:27:27.913800 11808 serve_hostnames.go:87] 1: kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:27:27.913825 11808 serve_hostnames.go:87] 2: kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:27:27.913846 11808 serve_hostnames.go:87] 3: kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:27:27.913904 11808 serve_hostnames.go:95] Using namespace serve-hostnames-4997 for this test.
|
||||
I0326 14:27:27.913931 11808 serve_hostnames.go:98] Creating service serve-hostnames-4997/serve-hostnames
|
||||
I0326 14:27:27.969083 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-0-0 on node kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.020133 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-0-1 on node kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.070054 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-1-0 on node kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.118641 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-1-1 on node kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.168786 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-2-0 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.214730 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-2-1 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.261685 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-3-0 on node kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.320224 11808 serve_hostnames.go:148] Creating pod serve-hostnames-4997/serve-hostname-3-1 on node kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:27:28.387007 11808 serve_hostnames.go:194] Waiting for the serve-hostname pods to be ready
|
||||
I0326 14:28:28.969149 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-0-0 is running
|
||||
I0326 14:28:29.010376 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-0-1 is running
|
||||
I0326 14:28:29.050463 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-1-0 is running
|
||||
I0326 14:28:29.091164 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-1-1 is running
|
||||
I0326 14:30:00.850461 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-2-0 is running
|
||||
I0326 14:30:00.891559 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-2-1 is running
|
||||
I0326 14:30:00.932829 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-3-0 is running
|
||||
I0326 14:30:00.973941 11808 serve_hostnames.go:211] serve-hostnames-4997/serve-hostname-3-1 is running
|
||||
W0326 14:30:04.726582 11808 serve_hostnames.go:265] No response from pod serve-hostname-2-0 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal at iteration 0
|
||||
W0326 14:30:04.726658 11808 serve_hostnames.go:265] No response from pod serve-hostname-2-1 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal at iteration 0
|
||||
I0326 14:30:04.726696 11808 serve_hostnames.go:269] Iteration 0 took 3.711080213s for 80 queries (21.56 QPS)
|
||||
W0326 14:30:08.267297 11808 serve_hostnames.go:265] No response from pod serve-hostname-2-0 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal at iteration 1
|
||||
W0326 14:30:08.267365 11808 serve_hostnames.go:265] No response from pod serve-hostname-2-1 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal at iteration 1
|
||||
I0326 14:30:08.267404 11808 serve_hostnames.go:269] Iteration 1 took 3.540635303s for 80 queries (22.59 QPS)
|
||||
I0326 14:30:11.971349 11808 serve_hostnames.go:269] Iteration 2 took 3.703884372s for 80 queries (21.60 QPS)
|
||||
I0326 14:30:11.971425 11808 serve_hostnames.go:182] Cleaning up pods
|
||||
I0326 14:30:12.382932 11808 serve_hostnames.go:130] Cleaning up service serve-hostnames-4997/server-hostnames
|
||||
```
|
||||
|
||||
Notice here that for the first two iterations neither of the pods on node 2 responded but by the third iteration responses
|
||||
were received from all nodes.
|
||||
|
||||
For a soak test use `--up_to=-1` which will loop indefinitely.
|
||||
|
||||
|
||||
Note that this is not designed to be a performance test. The goal for this program is to provide an easy way to have a soak test
|
||||
that can run indefinitely an exercise enough of Kubernetes' functionality to be confident that the cluster is still up and healthy.
|
||||
The reported QPS mainly indicates latency to the master since the proxy requests are issued (deliberately) in a serial manner.
|
||||
|
||||
|
||||
A more detailed report can be produced with `--v=4` which measures the time taken to perform various operations
|
||||
and it also reports the distribution of responses received from the pods. In the example below
|
||||
we see that the pod on node 0 returned 18 responses, the pod on node 1 returned 10 responses and the
|
||||
pod on node 3 returned 12 responses and the pod on node 2 did not respond at all.
|
||||
```
|
||||
$ ./serve_hostnames --v=4
|
||||
I0326 14:33:26.020917 12099 serve_hostnames.go:60] Starting serve_hostnames soak test with queries=10 and podsPerNode=1 upTo=1
|
||||
I0326 14:33:26.365201 12099 serve_hostnames.go:85] Nodes found on this cluster:
|
||||
I0326 14:33:26.365260 12099 serve_hostnames.go:87] 0: kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.365288 12099 serve_hostnames.go:87] 1: kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.365313 12099 serve_hostnames.go:87] 2: kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.365334 12099 serve_hostnames.go:87] 3: kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.365392 12099 serve_hostnames.go:95] Using namespace serve-hostnames-1631 for this test.
|
||||
I0326 14:33:26.365419 12099 serve_hostnames.go:98] Creating service serve-hostnames-1631/serve-hostnames
|
||||
I0326 14:33:26.423927 12099 serve_hostnames.go:118] Service create serve-hostnames-1631/server-hostnames took 58.473361ms
|
||||
I0326 14:33:26.423981 12099 serve_hostnames.go:148] Creating pod serve-hostnames-1631/serve-hostname-0-0 on node kubernetes-node-5h4m.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.480185 12099 serve_hostnames.go:168] Pod create serve-hostnames-1631/serve-hostname-0-0 request took 56.178906ms
|
||||
I0326 14:33:26.480271 12099 serve_hostnames.go:148] Creating pod serve-hostnames-1631/serve-hostname-1-0 on node kubernetes-node-9i4n.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.534300 12099 serve_hostnames.go:168] Pod create serve-hostnames-1631/serve-hostname-1-0 request took 53.981761ms
|
||||
I0326 14:33:26.534396 12099 serve_hostnames.go:148] Creating pod serve-hostnames-1631/serve-hostname-2-0 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.590188 12099 serve_hostnames.go:168] Pod create serve-hostnames-1631/serve-hostname-2-0 request took 55.752115ms
|
||||
I0326 14:33:26.590222 12099 serve_hostnames.go:148] Creating pod serve-hostnames-1631/serve-hostname-3-0 on node kubernetes-node-jay1.c.kubernetes-satnam.internal
|
||||
I0326 14:33:26.650024 12099 serve_hostnames.go:168] Pod create serve-hostnames-1631/serve-hostname-3-0 request took 59.781614ms
|
||||
I0326 14:33:26.650083 12099 serve_hostnames.go:194] Waiting for the serve-hostname pods to be ready
|
||||
I0326 14:33:32.776651 12099 serve_hostnames.go:211] serve-hostnames-1631/serve-hostname-0-0 is running
|
||||
I0326 14:33:32.822324 12099 serve_hostnames.go:211] serve-hostnames-1631/serve-hostname-1-0 is running
|
||||
I0326 14:35:03.741235 12099 serve_hostnames.go:211] serve-hostnames-1631/serve-hostname-2-0 is running
|
||||
I0326 14:35:03.786411 12099 serve_hostnames.go:211] serve-hostnames-1631/serve-hostname-3-0 is running
|
||||
I0326 14:35:03.878030 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 45.656425ms
|
||||
I0326 14:35:03.923999 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 45.887564ms
|
||||
I0326 14:35:03.967731 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.7004ms
|
||||
I0326 14:35:04.011077 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.318018ms
|
||||
I0326 14:35:04.054958 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.843043ms
|
||||
I0326 14:35:04.099051 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.030505ms
|
||||
I0326 14:35:04.143197 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.069434ms
|
||||
I0326 14:35:04.186800 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.530301ms
|
||||
I0326 14:35:04.230492 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.658239ms
|
||||
I0326 14:35:04.274337 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.800072ms
|
||||
I0326 14:35:04.317801 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.379729ms
|
||||
I0326 14:35:04.362778 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.897882ms
|
||||
I0326 14:35:04.406845 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.976645ms
|
||||
I0326 14:35:04.450513 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.613496ms
|
||||
I0326 14:35:04.494369 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.777934ms
|
||||
I0326 14:35:04.538399 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.945502ms
|
||||
I0326 14:35:04.583760 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 45.285171ms
|
||||
I0326 14:35:04.637430 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 53.629532ms
|
||||
I0326 14:35:04.681389 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.918124ms
|
||||
I0326 14:35:04.725401 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.964965ms
|
||||
I0326 14:35:04.769218 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.734827ms
|
||||
I0326 14:35:04.812660 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.376494ms
|
||||
I0326 14:35:04.857974 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 45.246004ms
|
||||
I0326 14:35:04.901706 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.668478ms
|
||||
I0326 14:35:04.945372 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.642202ms
|
||||
I0326 14:35:04.989023 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.619706ms
|
||||
I0326 14:35:05.033153 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.087168ms
|
||||
I0326 14:35:05.077038 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.791991ms
|
||||
I0326 14:35:05.124299 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 47.214038ms
|
||||
I0326 14:35:05.168162 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.795225ms
|
||||
I0326 14:35:05.211687 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.48304ms
|
||||
I0326 14:35:05.255553 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.799647ms
|
||||
I0326 14:35:05.299352 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.72493ms
|
||||
I0326 14:35:05.342916 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.509589ms
|
||||
I0326 14:35:05.386952 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.947881ms
|
||||
I0326 14:35:05.431467 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.442041ms
|
||||
I0326 14:35:05.475834 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.304759ms
|
||||
I0326 14:35:05.519373 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.501574ms
|
||||
I0326 14:35:05.563584 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 44.162687ms
|
||||
I0326 14:35:05.607126 12099 serve_hostnames.go:249] Proxy call in namespace serve-hostnames-1631 took 43.478674ms
|
||||
I0326 14:35:05.607164 12099 serve_hostnames.go:258] serve-hostname-3-0: 12
|
||||
I0326 14:35:05.607176 12099 serve_hostnames.go:258] serve-hostname-1-0: 10
|
||||
I0326 14:35:05.607186 12099 serve_hostnames.go:258] serve-hostname-0-0: 18
|
||||
W0326 14:35:05.607199 12099 serve_hostnames.go:265] No response from pod serve-hostname-2-0 on node kubernetes-node-d0yo.c.kubernetes-satnam.internal at iteration 0
|
||||
I0326 14:35:05.607211 12099 serve_hostnames.go:269] Iteration 0 took 1.774856469s for 40 queries (22.54 QPS)
|
||||
I0326 14:35:05.607236 12099 serve_hostnames.go:182] Cleaning up pods
|
||||
I0326 14:35:05.797893 12099 serve_hostnames.go:130] Cleaning up service serve-hostnames-1631/server-hostnames
|
||||
```
|
||||
|
||||
|
||||
[]()
|
346
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/serve_hostnames.go
generated
vendored
Normal file
346
vendor/k8s.io/kubernetes/test/soak/serve_hostnames/serve_hostnames.go
generated
vendored
Normal file
@ -0,0 +1,346 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
This soak tests places a specified number of pods on each node and then
|
||||
repeatedly sends queries to a service running on these pods via
|
||||
a serivce
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
queriesAverage = flag.Int("queries", 100, "Number of hostname queries to make in each iteration per pod on average")
|
||||
podsPerNode = flag.Int("pods_per_node", 1, "Number of serve_hostname pods per node")
|
||||
upTo = flag.Int("up_to", 1, "Number of iterations or -1 for no limit")
|
||||
maxPar = flag.Int("max_par", 500, "Maximum number of queries in flight")
|
||||
gke = flag.String("gke_context", "", "Target GKE cluster with context gke_{project}_{zone}_{cluster-name}")
|
||||
)
|
||||
|
||||
const (
|
||||
deleteTimeout = 2 * time.Minute
|
||||
endpointTimeout = 5 * time.Minute
|
||||
nodeListTimeout = 2 * time.Minute
|
||||
podCreateTimeout = 2 * time.Minute
|
||||
podStartTimeout = 30 * time.Minute
|
||||
serviceCreateTimeout = 2 * time.Minute
|
||||
namespaceDeleteTimeout = 5 * time.Minute
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
glog.Infof("Starting serve_hostnames soak test with queries=%d and podsPerNode=%d upTo=%d",
|
||||
*queriesAverage, *podsPerNode, *upTo)
|
||||
|
||||
var spec string
|
||||
if *gke != "" {
|
||||
spec = filepath.Join(os.Getenv("HOME"), ".config", "gcloud", "kubernetes", "kubeconfig")
|
||||
} else {
|
||||
spec = filepath.Join(os.Getenv("HOME"), ".kube", "config")
|
||||
}
|
||||
settings, err := clientcmd.LoadFromFile(spec)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error loading configuration: %v", err.Error())
|
||||
}
|
||||
if *gke != "" {
|
||||
settings.CurrentContext = *gke
|
||||
}
|
||||
config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig()
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to construct config: %v", err)
|
||||
}
|
||||
|
||||
client, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to make client: %v", err)
|
||||
}
|
||||
|
||||
var nodes *v1.NodeList
|
||||
for start := time.Now(); time.Since(start) < nodeListTimeout; time.Sleep(2 * time.Second) {
|
||||
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("Failed to list nodes: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Fatalf("Giving up trying to list nodes: %v", err)
|
||||
}
|
||||
|
||||
if len(nodes.Items) == 0 {
|
||||
glog.Fatalf("Failed to find any nodes.")
|
||||
}
|
||||
|
||||
glog.Infof("Found %d nodes on this cluster:", len(nodes.Items))
|
||||
for i, node := range nodes.Items {
|
||||
glog.Infof("%d: %s", i, node.Name)
|
||||
}
|
||||
|
||||
queries := *queriesAverage * len(nodes.Items) * *podsPerNode
|
||||
|
||||
// Create the namespace
|
||||
got, err := client.CoreV1().Namespaces().Create(&v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: "serve-hostnames-"}})
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create namespace: %v", err)
|
||||
}
|
||||
ns := got.Name
|
||||
defer func(ns string) {
|
||||
if err := client.CoreV1().Namespaces().Delete(ns, nil); err != nil {
|
||||
glog.Warningf("Failed to delete namespace ns: %e", ns, err)
|
||||
} else {
|
||||
// wait until the namespace disappears
|
||||
for i := 0; i < int(namespaceDeleteTimeout/time.Second); i++ {
|
||||
if _, err := client.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}(ns)
|
||||
glog.Infof("Created namespace %s", ns)
|
||||
|
||||
// Create a service for these pods.
|
||||
glog.Infof("Creating service %s/serve-hostnames", ns)
|
||||
// Make several attempts to create a service.
|
||||
var svc *v1.Service
|
||||
for start := time.Now(); time.Since(start) < serviceCreateTimeout; time.Sleep(2 * time.Second) {
|
||||
t := time.Now()
|
||||
svc, err = client.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "serve-hostnames",
|
||||
Labels: map[string]string{
|
||||
"name": "serve-hostname",
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Protocol: "TCP",
|
||||
Port: 9376,
|
||||
TargetPort: intstr.FromInt(9376),
|
||||
}},
|
||||
Selector: map[string]string{
|
||||
"name": "serve-hostname",
|
||||
},
|
||||
},
|
||||
})
|
||||
glog.V(4).Infof("Service create %s/server-hostnames took %v", ns, time.Since(t))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("After %v failed to create service %s/serve-hostnames: %v", time.Since(start), ns, err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to create service %s/%s: %v", ns, svc.Name, err)
|
||||
return
|
||||
}
|
||||
// Clean up service
|
||||
defer func() {
|
||||
glog.Infof("Cleaning up service %s/serve-hostnames", ns)
|
||||
// Make several attempts to delete the service.
|
||||
for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
|
||||
if err := client.CoreV1().Services(ns).Delete(svc.Name, nil); err == nil {
|
||||
return
|
||||
}
|
||||
glog.Warningf("After %v unable to delete service %s/%s: %v", time.Since(start), ns, svc.Name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Put serve-hostname pods on each node.
|
||||
podNames := []string{}
|
||||
for i, node := range nodes.Items {
|
||||
for j := 0; j < *podsPerNode; j++ {
|
||||
podName := fmt.Sprintf("serve-hostname-%d-%d", i, j)
|
||||
podNames = append(podNames, podName)
|
||||
// Make several attempts
|
||||
for start := time.Now(); time.Since(start) < podCreateTimeout; time.Sleep(2 * time.Second) {
|
||||
glog.Infof("Creating pod %s/%s on node %s", ns, podName, node.Name)
|
||||
t := time.Now()
|
||||
_, err = client.CoreV1().Pods(ns).Create(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{
|
||||
"name": "serve-hostname",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "serve-hostname",
|
||||
Image: e2e.ServeHostnameImage,
|
||||
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
|
||||
},
|
||||
},
|
||||
NodeName: node.Name,
|
||||
},
|
||||
})
|
||||
glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t))
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("After %s failed to create pod %s/%s: %v", time.Since(start), ns, podName, err)
|
||||
}
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to create pod %s/%s: %v", ns, podName, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Clean up the pods
|
||||
defer func() {
|
||||
glog.Info("Cleaning up pods")
|
||||
// Make several attempts to delete the pods.
|
||||
for _, podName := range podNames {
|
||||
for start := time.Now(); time.Since(start) < deleteTimeout; time.Sleep(1 * time.Second) {
|
||||
if err = client.CoreV1().Pods(ns).Delete(podName, nil); err == nil {
|
||||
break
|
||||
}
|
||||
glog.Warningf("After %v failed to delete pod %s/%s: %v", time.Since(start), ns, podName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
glog.Info("Waiting for the serve-hostname pods to be ready")
|
||||
for _, podName := range podNames {
|
||||
var pod *v1.Pod
|
||||
for start := time.Now(); time.Since(start) < podStartTimeout; time.Sleep(5 * time.Second) {
|
||||
pod, err = client.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Warningf("Get pod %s/%s failed, ignoring for %v: %v", ns, podName, err, podStartTimeout)
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == v1.PodRunning {
|
||||
break
|
||||
}
|
||||
}
|
||||
if pod.Status.Phase != v1.PodRunning {
|
||||
glog.Warningf("Gave up waiting on pod %s/%s to be running (saw %v)", ns, podName, pod.Status.Phase)
|
||||
} else {
|
||||
glog.Infof("%s/%s is running", ns, podName)
|
||||
}
|
||||
}
|
||||
|
||||
rclient, err := restclient.RESTClientFor(config)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to build restclient: %v", err)
|
||||
return
|
||||
}
|
||||
proxyRequest, errProxy := e2e.GetServicesProxyRequest(client, rclient.Get())
|
||||
if errProxy != nil {
|
||||
glog.Warningf("Get services proxy request failed: %v", errProxy)
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the endpoints to propagate.
|
||||
for start := time.Now(); time.Since(start) < endpointTimeout; time.Sleep(10 * time.Second) {
|
||||
hostname, err := proxyRequest.
|
||||
Namespace(ns).
|
||||
Name("serve-hostnames").
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
glog.Infof("After %v while making a proxy call got error %v", time.Since(start), err)
|
||||
continue
|
||||
}
|
||||
var r metav1.Status
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), hostname, &r); err != nil {
|
||||
break
|
||||
}
|
||||
if r.Status == metav1.StatusFailure {
|
||||
glog.Infof("After %v got status %v", time.Since(start), string(hostname))
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Repeatedly make requests.
|
||||
for iteration := 0; iteration != *upTo; iteration++ {
|
||||
responseChan := make(chan string, queries)
|
||||
// Use a channel of size *maxPar to throttle the number
|
||||
// of in-flight requests to avoid overloading the service.
|
||||
inFlight := make(chan struct{}, *maxPar)
|
||||
start := time.Now()
|
||||
for q := 0; q < queries; q++ {
|
||||
go func(i int, query int) {
|
||||
inFlight <- struct{}{}
|
||||
t := time.Now()
|
||||
hostname, err := proxyRequest.
|
||||
Namespace(ns).
|
||||
Name("serve-hostnames").
|
||||
DoRaw()
|
||||
glog.V(4).Infof("Proxy call in namespace %s took %v", ns, time.Since(t))
|
||||
if err != nil {
|
||||
glog.Warningf("Call failed during iteration %d query %d : %v", i, query, err)
|
||||
// If the query failed return a string which starts with a character
|
||||
// that can't be part of a hostname.
|
||||
responseChan <- fmt.Sprintf("!failed in iteration %d to issue query %d: %v", i, query, err)
|
||||
} else {
|
||||
responseChan <- string(hostname)
|
||||
}
|
||||
<-inFlight
|
||||
}(iteration, q)
|
||||
}
|
||||
responses := make(map[string]int, *podsPerNode*len(nodes.Items))
|
||||
missing := 0
|
||||
for q := 0; q < queries; q++ {
|
||||
r := <-responseChan
|
||||
glog.V(4).Infof("Got response from %s", r)
|
||||
responses[r]++
|
||||
// If the returned hostname starts with '!' then it indicates
|
||||
// an error response.
|
||||
if len(r) > 0 && r[0] == '!' {
|
||||
glog.V(3).Infof("Got response %s", r)
|
||||
missing++
|
||||
}
|
||||
}
|
||||
if missing > 0 {
|
||||
glog.Warningf("Missing %d responses out of %d", missing, queries)
|
||||
}
|
||||
// Report any nodes that did not respond.
|
||||
for n, node := range nodes.Items {
|
||||
for i := 0; i < *podsPerNode; i++ {
|
||||
name := fmt.Sprintf("serve-hostname-%d-%d", n, i)
|
||||
if _, ok := responses[name]; !ok {
|
||||
glog.Warningf("No response from pod %s on node %s at iteration %d", name, node.Name, iteration)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.Infof("Iteration %d took %v for %d queries (%.2f QPS) with %d missing",
|
||||
iteration, time.Since(start), queries-missing, float64(queries-missing)/time.Since(start).Seconds(), missing)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user