vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

View File

@ -0,0 +1,14 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,68 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
log() {
printf "$*" >&1
}
debug() {
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
}
domount() {
debug "domount $@"
MNTPATH=$1
mkdir -p ${MNTPATH} >/dev/null 2>&1
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
log "{\"status\":\"Success\"}"
exit 0
}
unmount() {
debug "unmount $@"
MNTPATH=$1
rm ${MNTPATH}/index.html >/dev/null 2>&1
umount ${MNTPATH} >/dev/null 2>&1
log "{\"status\":\"Success\"}"
exit 0
}
op=$1
if [ "$op" = "init" ]; then
debug "init $@"
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":false}}"
exit 0
fi
shift
case "$op" in
mount)
domount $*
;;
unmount)
unmount $*
;;
*)
log "{\"status\":\"Not supported\"}"
exit 0
esac
exit 1

View File

@ -0,0 +1,124 @@
#!/bin/sh
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FLEX_DUMMY_LOG=${FLEX_DUMMY_LOG:-"/tmp/flex-dummy.log"}
VALID_MNTDEVICE=foo
# attach always returns one valid mount device so a different device
# showing up in a subsequent driver call implies a bug
validateMountDeviceOrDie() {
MNTDEVICE=$1
CALL=$2
if [ "$MNTDEVICE" != "$VALID_MNTDEVICE" ]; then
log "{\"status\":\"Failure\",\"message\":\"call "${CALL}" expected device "${VALID_MNTDEVICE}", got device "${MNTDEVICE}"\"}"
exit 0
fi
}
log() {
printf "$*" >&1
}
debug() {
echo "$(date) $*" >> "${FLEX_DUMMY_LOG}"
}
attach() {
debug "attach $@"
log "{\"status\":\"Success\",\"device\":\""${VALID_MNTDEVICE}"\"}"
exit 0
}
detach() {
debug "detach $@"
# TODO issue 44737 detach is passed PV name, not mount device
log "{\"status\":\"Success\"}"
exit 0
}
waitforattach() {
debug "waitforattach $@"
MNTDEVICE=$1
validateMountDeviceOrDie "$MNTDEVICE" "waitforattach"
log "{\"status\":\"Success\",\"device\":\""${MNTDEVICE}"\"}"
exit 0
}
isattached() {
debug "isattached $@"
log "{\"status\":\"Success\",\"attached\":true}"
exit 0
}
domountdevice() {
debug "domountdevice $@"
MNTDEVICE=$2
validateMountDeviceOrDie "$MNTDEVICE" "domountdevice"
MNTPATH=$1
mkdir -p ${MNTPATH} >/dev/null 2>&1
mount -t tmpfs none ${MNTPATH} >/dev/null 2>&1
echo "Hello from flexvolume!" >> "${MNTPATH}/index.html"
log "{\"status\":\"Success\"}"
exit 0
}
unmountdevice() {
debug "unmountdevice $@"
MNTDEVICE=$2
validateMountDeviceOrDie "$MNTDEVICE" "unmountdevice"
MNTPATH=$1
rm "${MNTPATH}/index.html" >/dev/null 2>&1
umount ${MNTPATH} >/dev/null 2>&1
log "{\"status\":\"Success\"}"
exit 0
}
op=$1
if [ "$op" = "init" ]; then
debug "init $@"
log "{\"status\":\"Success\",\"capabilities\":{\"attach\":true}}"
exit 0
fi
shift
case "$op" in
attach)
attach $*
;;
detach)
detach $*
;;
waitforattach)
waitforattach $*
;;
isattached)
isattached $*
;;
mountdevice)
domountdevice $*
;;
unmountdevice)
unmountdevice $*
;;
*)
log "{\"status\":\"Not supported\"}"
exit 0
esac
exit 1

View File

@ -0,0 +1,29 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: frontend
spec:
replicas: 3
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: {{.GBFrontendImage}}
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below:
# value: env
ports:
- containerPort: 80

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: frontend
labels:
app: guestbook
tier: frontend
spec:
# if your cluster supports it, uncomment the following to automatically create
# an external load-balanced IP for the frontend service.
# type: LoadBalancer
ports:
- port: 80
selector:
app: guestbook
tier: frontend

View File

@ -0,0 +1,22 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-master
spec:
replicas: 1
template:
metadata:
labels:
app: redis
role: master
tier: backend
spec:
containers:
- name: master
image: {{.RedisImage}}
resources:
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 6379

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: redis-master
labels:
app: redis
role: master
tier: backend
spec:
ports:
- port: 6379
targetPort: 6379
selector:
app: redis
role: master
tier: backend

View File

@ -0,0 +1,30 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: redis-slave
spec:
replicas: 2
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: {{.GBRedisSlaveImage}}
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below:
# value: env
ports:
- containerPort: 6379

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: redis-slave
labels:
app: redis
role: slave
tier: backend
spec:
ports:
- port: 6379
selector:
app: redis
role: slave
tier: backend

View File

@ -0,0 +1,25 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: echomap
spec:
rules:
- host: foo.bar.com
http:
paths:
- path: /foo
backend:
serviceName: echoheadersx
servicePort: 80
- host: bar.baz.com
http:
paths:
- path: /bar
backend:
serviceName: echoheadersy
servicePort: 80
- path: /foo
backend:
serviceName: echoheadersx
servicePort: 80

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: echoheaders
spec:
replicas: 1
template:
metadata:
labels:
app: echoheaders
spec:
containers:
- name: echoheaders
image: gcr.io/google_containers/echoserver:1.6
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
periodSeconds: 1
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 10

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Service
metadata:
name: echoheadersx
labels:
app: echoheaders
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: echoheaders
---
apiVersion: v1
kind: Service
metadata:
name: echoheadersy
labels:
app: echoheaders
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: echoheaders

View File

@ -0,0 +1,8 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: hostname
spec:
backend:
serviceName: hostname
servicePort: 80

View File

@ -0,0 +1,17 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
labels:
run: hostname
name: hostname
spec:
template:
metadata:
labels:
run: hostname
spec:
containers:
- image: gcr.io/kubernetes-e2e-test-images/serve-hostname-amd64:1.1
imagePullPolicy: IfNotPresent
name: hostname
terminationGracePeriodSeconds: 120

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: hostname
annotations:
alpha.cloud.google.com/load-balancer-neg: "true"
spec:
ports:
- port: 80
protocol: TCP
targetPort: 9376
selector:
run: hostname
sessionAffinity: None
type: NodePort

View File

@ -0,0 +1,50 @@
# nginx ingress controller RC
apiVersion: v1
kind: ReplicationController
metadata:
name: nginx-ingress-controller
labels:
k8s-app: nginx-ingress-lb
spec:
replicas: 1
selector:
k8s-app: nginx-ingress-lb
template:
metadata:
labels:
k8s-app: nginx-ingress-lb
name: nginx-ingress-lb
spec:
terminationGracePeriodSeconds: 0
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.1
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
name: nginx-ingress-lb
# use downward API
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- containerPort: 80
hostPort: 80
- containerPort: 443
hostPort: 443
# we expose 18080 to access nginx stats in url /nginx-status
# this is optional
- containerPort: 18080
hostPort: 18080
args:
- /nginx-ingress-controller
- --default-backend-service=kube-system/default-http-backend

View File

@ -0,0 +1,16 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: static-ip
# This annotation is added by the test upon allocating a staticip.
# annotations:
# kubernetes.io/ingress.global-static-ip-name: "staticip"
spec:
tls:
# This assumes tls-secret exists.
# To generate it run the make in this directory.
- secretName: tls-secret
backend:
serviceName: echoheaders-https
servicePort: 80

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: echoheaders-https
spec:
replicas: 2
template:
metadata:
labels:
app: echoheaders-https
spec:
containers:
- name: echoheaders-https
image: gcr.io/google_containers/echoserver:1.6
ports:
- containerPort: 8080

View File

@ -0,0 +1,9 @@
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLekNDQWhPZ0F3SUJBZ0lKQU95Nk1BZ3BYWkNYTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ3d4RkRBU0JnTlYKQkFNTUMyVjRZVzF3YkdVdVkyOXRNUlF3RWdZRFZRUUtEQXRsZUdGdGNHeGxMbU52YlRBZUZ3MHhOakExTVRjeApPREF3TURWYUZ3MHhOekExTVRjeE9EQXdNRFZhTUN3eEZEQVNCZ05WQkFNTUMyVjRZVzF3YkdVdVkyOXRNUlF3CkVnWURWUVFLREF0bGVHRnRjR3hsTG1OdmJUQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0MKZ2dFQkFLMkxGZzFnR1dMM2MyZUdhUDRIek9ncTNWSW1wcFc0VVZEOU1mUWxyU2RmWDdDRjJ1M05taXRHNnFkawpiakQ5RXFSTkpibElSZTBMcE1aL2E4Zjlvems3M2VuT0huM0Jzd1A5RTJlSjhmODhCejk1MGZPM3dXcW5qanMzCk01NWxXMkFWZ0pvVWVTT3JSalZDakp1TzhJWHFBbDdQMlZtamlvUGdFaHV0NU9tVThaS21BRTNhcGlJR3dJZm8KenZYNjAwV0ZtdGhkQ3IrMFBMU3ZQR29jay9ySDcvbWJvbGNLVHRkdm41bGE2aUY1enVpZXRWbVA2M0wzekVZUAp0UVNoMnNRSGxVbllEZnl4a1ppU2UrQmE5ZW8wRjBlNzc0MlZhQUkzUERDTDhNZ1Z5VVkybEdXZXhLbFN5TFgzCkpGWDM5NjlXSGZ3ejNjYXhybG4wUEFpNmFqVUNBd0VBQWFOUU1FNHdIUVlEVlIwT0JCWUVGR0tEbU5VMWJJaGEKMWFaTDVtYkRCV2pvWTROMU1COEdBMVVkSXdRWU1CYUFGR0tEbU5VMWJJaGExYVpMNW1iREJXam9ZNE4xTUF3RwpBMVVkRXdRRk1BTUJBZjh3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUo3WDM1UXp1andEQXJHTUorOXFuRzZECkhkYUNBQTVVb2dZdWpTYmdNa3NFU1VzZzJnMDdvRG41T2N6SFF4SmQ5eWtYVzVkRmQvelpaTzZHRUlPZnFUQ0cKOUpUQ29hcEJxZnh3eHRhaTl0dUdOamYwSXpVL2NoT3JYamowS1Y1Y2paVmRZd3F3QVVUa0VEaVE4dlF3YjVFZQprTHVXNXgwQlFXT1YwdU1wengwYU1PSkgxdmdGOWJPZGpPbyt1UkpBME95SWszYmRFcmt5MWg2QmNkcUpPUTA1CkRNLzgySEdqMCtpNGRnOGptQnlnRmpmYTk3YkczenVOTm1UVkhPK3hxbHJyZUdPQ3VmRi9CWUFFc1ZyODdlWnMKd2M1UFpJamRvekNSRlNCem9YLzlSMGtQQWI3Vms4bGpJUE9yeUYzeXR3MERiQnpKZWRMSWFyWE5QYWV3QUpNPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktZd2dnU2lBZ0VBQW9JQkFRQ3RpeFlOWUJsaTkzTm4KaG1qK0I4em9LdDFTSnFhVnVGRlEvVEgwSmEwblgxK3doZHJ0elpvclJ1cW5aRzR3L1JLa1RTVzVTRVh0QzZURwpmMnZIL2FNNU85M3B6aDU5d2JNRC9STm5pZkgvUEFjL2VkSHp0OEZxcDQ0N056T2VaVnRnRllDYUZIa2pxMFkxClFveWJqdkNGNmdKZXo5bFpvNHFENEJJYnJlVHBsUEdTcGdCTjJxWWlCc0NINk03MSt0TkZoWnJZWFFxL3REeTAKcnp4cUhKUDZ4Ky81bTZKWENrN1hiNStaV3VvaGVjN29uclZaait0eTk4eEdEN1VFb2RyRUI1VkoyQTM4c1pHWQprbnZnV3ZYcU5CZEh1KytObFdnQ056d3dpL0RJRmNsR05wUmxuc1NwVXNpMTl5UlY5L2V2VmgzOE05M0dzYTVaCjlEd0l1bW8xQWdNQkFBRUNnZ0VBSFc5MThoYld0MzZaU0huMzNQNmR0dE51YnJ5M2pMV1N0Vlg4M3hoMDRqUy8KR2tYWitIUGpMbXY4NlIrVHdTTnJ3Z3FEMTRWMnR0byt2SnhvUDZlNXc3OXZ5SFI1bjRMM1JqbnF6S2tOTHVtVApvU1NjZytZckhGZ0hPK3dGQ1Z6UHZ1Qm15N3VsUUhPUW1RQU1zV1h4VGdWL0dXM1B3L0NGVWhEemdWWmhlV3pPCmV3WTlyRFd1QXp6S1NkTWE0Rk5maWpWRllWcDE3RzUwZktPVzNaTk1yOWZjS01CSkdpdU84U1hUT0lGU0ppUFAKY1UzVVpiREJLejZOMXI5dzF1VVEralVVbDBBZ2NSOHR4Umx4YTBUUzNIUGN0TnIvK1BzYUg0ZFd5TzN1Y3RCUAo5K2lxVWh5dlBHelBUYzFuTXN4Wk9VREwreXJiNlNuVHA3L3BiYzROZ1FLQmdRRFZBOHd0K2ZITjZrS1ViV0YzCmNlOC9CMjNvakIzLytlSDJrTExONkVkSXJ2TFNPTlgzRFMvK3hxbkFlOFEzaDZadmpSSGdGKytHZkM3U2w5bS8KMGZTTWovU0VKY3BWdzBEdjB5ckU1ZzFBV25BbFpvT0E0ZWQ0N0lnb3ZsLys3ZjdGd3lRMm9lMkZJSmtzVENBSApMR1lVUUdZRFU4SkhTQXMweWwramo2NFVPUUtCZ1FEUWtEZE9pbDM0S1lQdURBaXBWTHh6bUlrRFRYS05zelJjCkxaQ1NPUUpKMTVDcjRSaUlyK2ZydXkwaEJQcjBZdmd2RDdUZHBuaUliaFlONnJRcnhXRWdLUkNiZnlTcUdmR2YKN0IwS1BNTWN6RkU2dXRBNTR2andseFA4VVZ4U1lENlBudUNTQmptOCthbVdwRkhpMkp0MzVKNjc5Y0kyc0tHUwoyMzh5WFd5ZDNRS0JnQUdMUElDY3ppYmE2czZlbUZWQVN5YWV6Q29pVWRsWUcwNHBNRktUdTJpSWRCUVgrMTBHCkNISUZTSmV2amZXRkV5eTl6Z0pjeWd5a2U4WmsrVndOam9NeVMraGxTYmtqYUNZVTFydUVtMVg3RWRNRGtqSnQKOExxTXBGUC9SVHpZeHI3eU1pSC9QSFI1andLbUxwayt0aUt4Y012WFlKSVpzSk1hWUdVVUZvUHBBb0dBU2JEcgpHY0VoK3JFUWdHZVlGOXhzeVpzM3JnY0xWcTNlN2tMYk5nOFdrK2lxb1ZCalRzaDRkWDRwTCtXR2xocngvZzdhCnBRWlF5RU85WHlWeWk1U3VBS01Cenk5WlVSRGhvdFBXWHV1aE5PZXNPOGdPRXFYenQyNXFEVmpoK2VrdnNhYzkKU2RzUlE0Z2pONnJQbEF0Y3d6dndLaEZua2ROUEE0aXlwS1VGMzdFQ2dZQURuVmt6MnFyMDRxNWtzaGRNcmI4RgpBUVJhSlhMcXBrZThvQUdsQ0pNMmVCZ1FZdjlEMWUxaXdSZFlrS0VUWmtIcXVaRDNVeVhLK1JPRU5uWjJHdkcwCmFJYlhHOTY4ZFhpZit6SzF3NmxkZWRCdGZIa1BTYTdCQ0ZCWURyaUc1NC9uTjZiWUFpem1NY2ZlWExlS0pPRG8KTHhTb1Iwek5NemZNVHFwYnhjdHZJUT09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: tls-secret
type: Opaque

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: echoheaders-https
labels:
app: echoheaders-https
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: echoheaders-https

View File

@ -0,0 +1,16 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: {{.NginxSlimNewImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,15 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: {{.NginxSlimNewImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,15 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: {{.NginxSlimImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: pause
labels:
name: pause
spec:
containers:
- name: pause
image: {{.PauseImage}}
ports:
- containerPort: 80

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
name: nginx
spec:
containers:
- name: nginx
image: {{.NginxSlimImage}}
ports:
- containerPort: 80
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
timeoutSeconds: 5

View File

@ -0,0 +1,40 @@
{
"kind":"ReplicationController",
"apiVersion":"v1",
"metadata":{
"name":"redis-master",
"labels":{
"app":"redis",
"role":"master"
}
},
"spec":{
"replicas":1,
"selector":{
"app":"redis",
"role":"master"
},
"template":{
"metadata":{
"labels":{
"app":"redis",
"role":"master"
}
},
"spec":{
"containers":[
{
"name":"redis-master",
"image": "{{.RedisImage}}",
"ports":[
{
"name":"redis-server",
"containerPort":6379
}
]
}
]
}
}
}
}

View File

@ -0,0 +1,23 @@
{
"kind":"Service",
"apiVersion":"v1",
"metadata":{
"name":"redis-master",
"labels":{
"app":"redis",
"role":"master"
}
},
"spec":{
"ports": [
{
"port":6379,
"targetPort":"redis-server"
}
],
"selector":{
"app":"redis",
"role":"master"
}
}
}

View File

@ -0,0 +1,49 @@
kind: ReplicationController
apiVersion: v1
metadata:
name: service-loadbalancer
labels:
app: service-loadbalancer
version: v1
spec:
replicas: 1
selector:
app: service-loadbalancer
version: v1
template:
metadata:
labels:
app: service-loadbalancer
version: v1
spec:
containers:
- image: gcr.io/google_containers/servicelb:0.1
imagePullPolicy: Always
livenessProbe:
httpGet:
path: /healthz
port: 8081
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
name: haproxy
ports:
# All http services
- containerPort: 80
hostPort: 80
protocol: TCP
# nginx https
- containerPort: 443
hostPort: 8080
protocol: TCP
# mysql
- containerPort: 3306
hostPort: 3306
protocol: TCP
# haproxy stats
- containerPort: 1936
hostPort: 1936
protocol: TCP
resources: {}
args:
- --tcp-services=mysql:3306,nginxsvc:443

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: netexec
spec:
# Assumes you have 3 nodes in your cluster.
replicas: 3
template:
metadata:
labels:
app: netexec
spec:
containers:
- name: netexec
image: gcr.io/google_containers/netexec:1.4
ports:
- containerPort: 8080
# This is to force these pods to land on different hosts.
# TODO: use the downward api and get podname instead.
hostPort: 81

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: netexec
labels:
app: netexec
spec:
Type: NodePort
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
app: netexec

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: my-nginx
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginxhttps
image: bprashanth/nginxhttps:1.0
ports:
- containerPort: 80

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: nginxsvc
labels:
app: nginx
spec:
ports:
- port: 80
protocol: TCP
name: http
selector:
app: nginx

View File

@ -0,0 +1,11 @@
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: cassandra-pdb
labels:
pdb: cassandra
spec:
minAvailable: 2
selector:
matchLabels:
app: cassandra

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: cassandra
name: cassandra
spec:
clusterIP: None
ports:
- port: 9042
selector:
app: cassandra

View File

@ -0,0 +1,86 @@
apiVersion: "apps/v1beta1"
kind: StatefulSet
metadata:
name: cassandra
spec:
serviceName: cassandra
replicas: 3
template:
metadata:
labels:
app: cassandra
spec:
containers:
- name: cassandra
image: gcr.io/google-samples/cassandra:v12
imagePullPolicy: Always
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
resources:
requests:
cpu: "300m"
memory: 1Gi
securityContext:
capabilities:
add:
- IPC_LOCK
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "PID=$(pidof java) && kill $PID && while ps -p $PID > /dev/null; do sleep 1; done"]
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CASSANDRA_SEEDS
value: "cassandra-0.cassandra.$(POD_NAMESPACE).svc.cluster.local"
- name: CASSANDRA_CLUSTER_NAME
value: "K8Demo"
- name: CASSANDRA_DC
value: "DC1-K8Demo"
- name: CASSANDRA_RACK
value: "Rack1-K8Demo"
- name: CASSANDRA_AUTO_BOOTSTRAP
value: "false"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
exec:
command:
- /bin/bash
- -c
- /ready-probe.sh
initialDelaySeconds: 15
timeoutSeconds: 5
# These volume mounts are persistent. They are like inline claims,
# but not exactly because the names need to match exactly one of
# the stateful pod volumes.
volumeMounts:
- name: cassandra-data
mountPath: /cassandra_data
# These are converted to volume claims by the controller
# and mounted at the paths mentioned above.
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
volumeClaimTemplates:
- metadata:
name: cassandra-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,48 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: cassandra-test-server
spec:
replicas: 3
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: gcr.io/google-containers/cassandra-e2e-test:0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: tester-pdb
labels:
pdb: test-server
spec:
minAvailable: 1
selector:
matchLabels:
app: test-server
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-server
name: test-server
spec:
ports:
- port: 8080
selector:
app: test-server
type: LoadBalancer

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# This is needed to make the peer-finder work properly and to help avoid
# edge cases where instance 0 comes up after losing its data and needs to
# decide whether it should create a new cluster or try to join an existing
# one. If it creates a new cluster when it should have joined an existing
# one, we'd end up with two separate clusters listening at the same service
# endpoint, which would be very bad.
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
clusterIP: None
selector:
app: cockroachdb

View File

@ -0,0 +1,100 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
template:
metadata:
labels:
app: cockroachdb
spec:
# Init containers are run only once in the lifetime of a pod, before
# it's started up for the first time. It has to exit successfully
# before the pod's main containers are allowed to start.
# This particular init container does a DNS lookup for other pods in
# the set to help determine whether or not a cluster already exists.
# If any other pods exist, it creates a file in the cockroach-data
# directory to pass that information along to the primary container that
# has to decide what command-line flags to use when starting CockroachDB.
# This only matters when a pod's persistent volume is empty - if it has
# data from a previous execution, that data will always be used.
initContainers:
- name: bootstrap
image: cockroachdb/cockroach-k8s-init:0.1
imagePullPolicy: IfNotPresent
args:
- "-on-start=/on-start.sh"
- "-service=cockroachdb"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: datadir
mountPath: "/cockroach/cockroach-data"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:v1.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
command:
- "/bin/bash"
- "-ecx"
- |
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
# We only want to initialize a new cluster (by omitting the join flag)
# if we're sure that we're the first node (i.e. index 0) and that
# there aren't any other nodes running as part of the cluster that
# this is supposed to be a part of (which indicates that a cluster
# already exists and we should make sure not to create a new one).
# It's fine to run without --join on a restart if there aren't any
# other nodes.
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
then
# We don't join cockroachdb in order to avoid a node attempting
# to join itself, which currently doesn't work
# (https://github.com/cockroachdb/cockroach/issues/9625).
CRARGS+=("--join" "cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb")
fi
exec /cockroach/cockroach ${CRARGS[*]}
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,11 @@
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: etcd-pdb
labels:
pdb: etcd
spec:
minAvailable: 2
selector:
matchLabels:
app: etcd

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
metadata:
name: etcd
labels:
app: etcd
spec:
ports:
- port: 2380
name: etcd-server
- port: 2379
name: etcd-client
clusterIP: None
selector:
app: etcd

View File

@ -0,0 +1,175 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: etcd
labels:
app: etcd
spec:
serviceName: etcd
replicas: 3
template:
metadata:
name: etcd
labels:
app: etcd
spec:
containers:
- name: etcd
image: gcr.io/google_containers/etcd-amd64:2.2.5
imagePullPolicy: Always
ports:
- containerPort: 2380
name: peer
- containerPort: 2379
name: client
resources:
requests:
cpu: 100m
memory: 512Mi
env:
- name: INITIAL_CLUSTER_SIZE
value: "3"
- name: SET_NAME
value: etcd
volumeMounts:
- name: datadir
mountPath: /var/run/etcd
lifecycle:
preStop:
exec:
command:
- "/bin/sh"
- "-ec"
- |
EPS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
done
HOSTNAME=$(hostname)
member_hash() {
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
}
echo "Removing ${HOSTNAME} from etcd cluster"
ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
if [ $? -eq 0 ]; then
# Remove everything otherwise the cluster will no longer scale-up
rm -rf /var/run/etcd/*
fi
command:
- "/bin/sh"
- "-ec"
- |
HOSTNAME=$(hostname)
# store member id into PVC for later member replacement
collect_member() {
while ! etcdctl member list &>/dev/null; do sleep 1; done
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
exit 0
}
eps() {
EPS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
done
echo ${EPS}
}
member_hash() {
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
}
# re-joining after failure?
if [ -e /var/run/etcd/default.etcd ]; then
echo "Re-joining etcd member"
member_id=$(cat /var/run/etcd/member_id)
# re-join member
ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--data-dir /var/run/etcd/default.etcd
fi
# etcd-SET_ID
SET_ID=${HOSTNAME:5:${#HOSTNAME}}
# adding a new member to existing cluster (assuming all initial pods are available)
if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
export ETCDCTL_ENDPOINT=$(eps)
# member already added?
MEMBER_HASH=$(member_hash)
if [ -n "${MEMBER_HASH}" ]; then
# the member hash exists but for some reason etcd failed
# as the datadir has not be created, we can remove the member
# and retrieve new hash
etcdctl member remove ${MEMBER_HASH}
fi
echo "Adding new member"
etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
if [ $? -ne 0 ]; then
echo "Exiting"
rm -f /var/run/etcd/new_member_envs
exit 1
fi
cat /var/run/etcd/new_member_envs
source /var/run/etcd/new_member_envs
collect_member &
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--data-dir /var/run/etcd/default.etcd \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--initial-cluster ${ETCD_INITIAL_CLUSTER} \
--initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
fi
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
while true; do
echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
sleep 1s
done
done
PEERS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
done
collect_member &
# join member
exec etcd --name ${HOSTNAME} \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster ${PEERS} \
--initial-cluster-state new \
--data-dir /var/run/etcd/default.etcd
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
# upstream recommended max is 700M
storage: 1Gi

View File

@ -0,0 +1,24 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: etcd-test-server
spec:
replicas: 3
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: gcr.io/google-containers/etcd-statefulset-e2e-test:0.0
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2

View File

@ -0,0 +1,18 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: galera
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
# *.galear.default.svc.cluster.local
clusterIP: None
selector:
app: mysql

View File

@ -0,0 +1,84 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "galera"
replicas: 3
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: install
image: gcr.io/google_containers/galera-install:0.1
imagePullPolicy: Always
args:
- "--work-dir=/work-dir"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
- name: config
mountPath: "/etc/mysql"
- name: bootstrap
image: debian:jessie
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=galera"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: workdir
mountPath: "/work-dir"
- name: config
mountPath: "/etc/mysql"
containers:
- name: mysql
image: gcr.io/google_containers/mysql-galera:e2e
ports:
- containerPort: 3306
name: mysql
- containerPort: 4444
name: sst
- containerPort: 4567
name: replication
- containerPort: 4568
name: ist
args:
- --defaults-file=/etc/mysql/my-galera.cnf
- --user=root
readinessProbe:
# TODO: If docker exec is buggy just use gcr.io/google_containers/mysql-healthz:1.0
exec:
command:
- sh
- -c
- "mysql -u root -e 'show databases;'"
initialDelaySeconds: 15
timeoutSeconds: 5
successThreshold: 2
volumeMounts:
- name: datadir
mountPath: /var/lib/
- name: config
mountPath: /etc/mysql
volumes:
- name: config
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql
labels:
app: mysql
data:
master.cnf: |
[mysqld]
log-bin
slave.cnf: |
[mysqld]
super-read-only

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
type: LoadBalancer

View File

@ -0,0 +1,159 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: mysql
replicas: 3
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: init-mysql
image: mysql:5.7
command:
- bash
- "-c"
- |
set -ex
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: gcr.io/google-samples/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
[[ -d /var/lib/mysql/mysql ]] && exit 0
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: mysql:5.7.15
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 1
memory: 1Gi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
exec:
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
timeoutSeconds: 1
- name: xtrabackup
image: gcr.io/google-samples/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
if [[ -f xtrabackup_slave_info ]]; then
mv xtrabackup_slave_info change_master_to.sql.in
rm -f xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm xtrabackup_binlog_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
mv change_master_to.sql.in change_master_to.sql.orig
mysql -h 127.0.0.1 <<EOF
$(<change_master_to.sql.orig),
MASTER_HOST='mysql-0.mysql',
MASTER_USER='root',
MASTER_PASSWORD='',
MASTER_CONNECT_RETRY=10;
START SLAVE;
EOF
fi
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 100Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: default
resources:
requests:
storage: 10Gi
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: mysql-pdb
labels:
pdb: mysql
spec:
minAvailable: 2
selector:
matchLabels:
app: mysql

View File

@ -0,0 +1,48 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: mysql-test-server
spec:
replicas: 3
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: gcr.io/google-containers/mysql-e2e-test:0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: tester-pdb
labels:
pdb: test-server
spec:
minAvailable: 1
selector:
matchLabels:
app: test-server
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-server
name: test-server
spec:
ports:
- port: 8080
selector:
app: test-server
type: LoadBalancer

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: gcr.io/google_containers/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
annotations:
volume.beta.kubernetes.io/storage-class: nginx-sc
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,18 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: redis
labels:
app: redis
spec:
ports:
- port: 6379
name: peer
# *.redis.default.svc.cluster.local
clusterIP: None
selector:
app: redis

View File

@ -0,0 +1,78 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: rd
spec:
serviceName: "redis"
replicas: 3
template:
metadata:
labels:
app: redis
spec:
initContainers:
- name: install
image: gcr.io/google_containers/redis-install-3.2.0:e2e
imagePullPolicy: Always
args:
- "--install-into=/opt"
- "--work-dir=/work-dir"
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
- name: bootstrap
image: debian:jessie
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=redis"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
containers:
- name: redis
image: debian:jessie
ports:
- containerPort: 6379
name: peer
command:
- /opt/redis/redis-server
args:
- /opt/redis/redis.conf
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/redis/redis-cli -h $(hostname) ping"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /data
- name: opt
mountPath: /opt
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,20 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
name: zk
labels:
app: zk
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
# *.zk.default.svc.cluster.local
clusterIP: None
selector:
app: zk

View File

@ -0,0 +1,85 @@
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: zoo
spec:
serviceName: "zk"
replicas: 3
template:
metadata:
labels:
app: zk
spec:
initContainers:
- name: install
image: gcr.io/google_containers/zookeeper-install-3.5.0-alpha:e2e
imagePullPolicy: Always
args:
- "--install-into=/opt"
- "--work-dir=/work-dir"
volumeMounts:
- name: opt
mountPath: "/opt/"
- name: workdir
mountPath: "/work-dir"
- name: bootstrap
image: java:openjdk-8-jre
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=zk"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
- name: datadir
mountPath: "/tmp/zookeeper"
containers:
- name: zk
image: java:openjdk-8-jre
ports:
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
command:
- /opt/zookeeper/bin/zkServer.sh
args:
- start-foreground
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/zookeeper/bin/zkCli.sh ls /"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /tmp/zookeeper
- name: opt
mountPath: /opt
# Mount the work-dir just for debugging
- name: workdir
mountPath: /work-dir
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi