vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,58 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: cassandra
# The labels will be applied automatically
# from the labels in the pod template, if not set
# labels:
# app: cassandra
spec:
replicas: 2
# The selector will be applied automatically
# from the labels in the pod template, if not set.
# selector:
# app: cassandra
template:
metadata:
labels:
app: cassandra
spec:
containers:
- command:
- /run.sh
resources:
limits:
cpu: 0.5
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v13
name: cassandra
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
volumeMounts:
- mountPath: /cassandra_data
name: data
volumes:
- name: data
emptyDir: {}

View File

@ -1,11 +0,0 @@
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: cassandra-pdb
labels:
pdb: cassandra
spec:
minAvailable: 2
selector:
matchLabels:
app: cassandra

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: cassandra
name: cassandra
spec:
clusterIP: None
ports:
- port: 9042
selector:
app: cassandra

View File

@ -1,90 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cassandra
spec:
serviceName: cassandra
replicas: 3
selector:
matchLabels:
app: cassandra
template:
metadata:
labels:
app: cassandra
spec:
containers:
- name: cassandra
image: gcr.io/google-samples/cassandra:v13
imagePullPolicy: Always
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
resources:
requests:
cpu: "300m"
memory: 1Gi
securityContext:
capabilities:
add:
- IPC_LOCK
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- nodetool drain
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CASSANDRA_SEEDS
value: "cassandra-0.cassandra.$(POD_NAMESPACE).svc.cluster.local"
- name: CASSANDRA_CLUSTER_NAME
value: "K8Demo"
- name: CASSANDRA_DC
value: "DC1-K8Demo"
- name: CASSANDRA_RACK
value: "Rack1-K8Demo"
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
readinessProbe:
exec:
command:
- /bin/bash
- -c
- /ready-probe.sh
initialDelaySeconds: 15
timeoutSeconds: 5
# These volume mounts are persistent. They are like inline claims,
# but not exactly because the names need to match exactly one of
# the stateful pod volumes.
volumeMounts:
- name: cassandra-data
mountPath: /cassandra_data
# These are converted to volume claims by the controller
# and mounted at the paths mentioned above.
# do not use these in production until ssd GCEPersistentDisk or other ssd pd
volumeClaimTemplates:
- metadata:
name: cassandra-data
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -1,51 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: cassandra-test-server
spec:
replicas: 3
selector:
matchLabels:
app: test-server
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: k8s.gcr.io/cassandra-e2e-test:0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: tester-pdb
labels:
pdb: test-server
spec:
minAvailable: 1
selector:
matchLabels:
app: test-server
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-server
name: test-server
spec:
ports:
- port: 8080
selector:
app: test-server
type: LoadBalancer

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: Service
metadata:
# This service only exists to create DNS entries for each pod in the stateful
# set such that they can resolve each other's IP addresses. It does not
# create a load-balanced ClusterIP and should not be used directly by clients
# in most circumstances.
name: cockroachdb
labels:
app: cockroachdb
annotations:
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
prometheus.io/scrape: "true"
prometheus.io/path: "_status/vars"
prometheus.io/port: "8080"
spec:
ports:
- port: 26257
targetPort: 26257
name: grpc
- port: 8080
targetPort: 8080
name: http
clusterIP: None
selector:
app: cockroachdb
# This is needed to make the peer-finder work properly and to help avoid
# edge cases where instance 0 comes up after losing its data and needs to
# decide whether it should create a new cluster or try to join an existing
# one. If it creates a new cluster when it should have joined an existing
# one, we'd end up with two separate clusters listening at the same service
# endpoint, which would be very bad.
publishNotReadyAddresses: true

View File

@ -1,103 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: cockroachdb
spec:
serviceName: "cockroachdb"
replicas: 3
selector:
matchLabels:
app: cockroachdb
template:
metadata:
labels:
app: cockroachdb
spec:
# Init containers are run only once in the lifetime of a pod, before
# it's started up for the first time. It has to exit successfully
# before the pod's main containers are allowed to start.
# This particular init container does a DNS lookup for other pods in
# the set to help determine whether or not a cluster already exists.
# If any other pods exist, it creates a file in the cockroach-data
# directory to pass that information along to the primary container that
# has to decide what command-line flags to use when starting CockroachDB.
# This only matters when a pod's persistent volume is empty - if it has
# data from a previous execution, that data will always be used.
initContainers:
- name: bootstrap
image: cockroachdb/cockroach-k8s-init:0.1
imagePullPolicy: IfNotPresent
args:
- "-on-start=/on-start.sh"
- "-service=cockroachdb"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: datadir
mountPath: "/cockroach/cockroach-data"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- cockroachdb
topologyKey: kubernetes.io/hostname
containers:
- name: cockroachdb
image: cockroachdb/cockroach:v1.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 26257
name: grpc
- containerPort: 8080
name: http
volumeMounts:
- name: datadir
mountPath: /cockroach/cockroach-data
command:
- "/bin/bash"
- "-ecx"
- |
# The use of qualified `hostname -f` is crucial:
# Other nodes aren't able to look up the unqualified hostname.
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
# We only want to initialize a new cluster (by omitting the join flag)
# if we're sure that we're the first node (i.e. index 0) and that
# there aren't any other nodes running as part of the cluster that
# this is supposed to be a part of (which indicates that a cluster
# already exists and we should make sure not to create a new one).
# It's fine to run without --join on a restart if there aren't any
# other nodes.
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
then
# We don't join cockroachdb in order to avoid a node attempting
# to join itself, which currently doesn't work
# (https://github.com/cockroachdb/cockroach/issues/9625).
CRARGS+=("--join" "cockroachdb-0.cockroachdb,cockroachdb-1.cockroachdb,cockroachdb-2.cockroachdb")
fi
exec /cockroach/cockroach ${CRARGS[*]}
# No pre-stop hook is required, a SIGTERM plus some time is all that's
# needed for graceful shutdown of a node.
terminationGracePeriodSeconds: 60
volumes:
- name: datadir
persistentVolumeClaim:
claimName: datadir
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: 1Gi

View File

@ -1,11 +0,0 @@
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: etcd-pdb
labels:
pdb: etcd
spec:
minAvailable: 2
selector:
matchLabels:
app: etcd

View File

@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: etcd
labels:
app: etcd
spec:
ports:
- port: 2380
name: etcd-server
- port: 2379
name: etcd-client
clusterIP: None
selector:
app: etcd
publishNotReadyAddresses: true

View File

@ -1,178 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: etcd
labels:
app: etcd
spec:
serviceName: etcd
replicas: 3
selector:
matchLabels:
app: etcd
template:
metadata:
name: etcd
labels:
app: etcd
spec:
containers:
- name: etcd
image: k8s.gcr.io/etcd-amd64:2.2.5
imagePullPolicy: Always
ports:
- containerPort: 2380
name: peer
- containerPort: 2379
name: client
resources:
requests:
cpu: 100m
memory: 512Mi
env:
- name: INITIAL_CLUSTER_SIZE
value: "3"
- name: SET_NAME
value: etcd
volumeMounts:
- name: datadir
mountPath: /var/run/etcd
lifecycle:
preStop:
exec:
command:
- "/bin/sh"
- "-ec"
- |
EPS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
done
HOSTNAME=$(hostname)
member_hash() {
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
}
echo "Removing ${HOSTNAME} from etcd cluster"
ETCDCTL_ENDPOINT=${EPS} etcdctl member remove $(member_hash)
if [ $? -eq 0 ]; then
# Remove everything otherwise the cluster will no longer scale-up
rm -rf /var/run/etcd/*
fi
command:
- "/bin/sh"
- "-ec"
- |
HOSTNAME=$(hostname)
# store member id into PVC for later member replacement
collect_member() {
while ! etcdctl member list &>/dev/null; do sleep 1; done
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1 > /var/run/etcd/member_id
exit 0
}
eps() {
EPS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
EPS="${EPS}${EPS:+,}http://${SET_NAME}-${i}.${SET_NAME}:2379"
done
echo ${EPS}
}
member_hash() {
etcdctl member list | grep http://${HOSTNAME}.${SET_NAME}:2380 | cut -d':' -f1 | cut -d'[' -f1
}
# re-joining after failure?
if [ -e /var/run/etcd/default.etcd ]; then
echo "Re-joining etcd member"
member_id=$(cat /var/run/etcd/member_id)
# re-join member
ETCDCTL_ENDPOINT=$(eps) etcdctl member update ${member_id} http://${HOSTNAME}.${SET_NAME}:2380
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--data-dir /var/run/etcd/default.etcd
fi
# etcd-SET_ID
SET_ID=${HOSTNAME:5:${#HOSTNAME}}
# adding a new member to existing cluster (assuming all initial pods are available)
if [ "${SET_ID}" -ge ${INITIAL_CLUSTER_SIZE} ]; then
export ETCDCTL_ENDPOINT=$(eps)
# member already added?
MEMBER_HASH=$(member_hash)
if [ -n "${MEMBER_HASH}" ]; then
# the member hash exists but for some reason etcd failed
# as the datadir has not be created, we can remove the member
# and retrieve new hash
etcdctl member remove ${MEMBER_HASH}
fi
echo "Adding new member"
etcdctl member add ${HOSTNAME} http://${HOSTNAME}.${SET_NAME}:2380 | grep "^ETCD_" > /var/run/etcd/new_member_envs
if [ $? -ne 0 ]; then
echo "Exiting"
rm -f /var/run/etcd/new_member_envs
exit 1
fi
cat /var/run/etcd/new_member_envs
source /var/run/etcd/new_member_envs
collect_member &
exec etcd --name ${HOSTNAME} \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--data-dir /var/run/etcd/default.etcd \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--initial-cluster ${ETCD_INITIAL_CLUSTER} \
--initial-cluster-state ${ETCD_INITIAL_CLUSTER_STATE}
fi
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
while true; do
echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up"
ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break
sleep 1s
done
done
PEERS=""
for i in $(seq 0 $((${INITIAL_CLUSTER_SIZE} - 1))); do
PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380"
done
collect_member &
# join member
exec etcd --name ${HOSTNAME} \
--initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \
--listen-client-urls http://${HOSTNAME}.${SET_NAME}:2379,http://127.0.0.1:2379 \
--advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \
--initial-cluster-token etcd-cluster-1 \
--initial-cluster ${PEERS} \
--initial-cluster-state new \
--data-dir /var/run/etcd/default.etcd
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
# upstream recommended max is 700M
storage: 1Gi

View File

@ -1,27 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: etcd-test-server
spec:
replicas: 3
selector:
matchLabels:
app: test-server
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: k8s.gcr.io/etcd-statefulset-e2e-test:0.0
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2

View File

@ -1,16 +0,0 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: galera
labels:
app: mysql
spec:
ports:
- port: 3306
name: mysql
# *.galear.default.svc.cluster.local
clusterIP: None
selector:
app: mysql
publishNotReadyAddresses: true

View File

@ -1,87 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: "galera"
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: install
image: k8s.gcr.io/galera-install:0.1
imagePullPolicy: Always
args:
- "--work-dir=/work-dir"
volumeMounts:
- name: workdir
mountPath: "/work-dir"
- name: config
mountPath: "/etc/mysql"
- name: bootstrap
image: debian:jessie
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=galera"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: workdir
mountPath: "/work-dir"
- name: config
mountPath: "/etc/mysql"
containers:
- name: mysql
image: k8s.gcr.io/mysql-galera:e2e
ports:
- containerPort: 3306
name: mysql
- containerPort: 4444
name: sst
- containerPort: 4567
name: replication
- containerPort: 4568
name: ist
args:
- --defaults-file=/etc/mysql/my-galera.cnf
- --user=root
readinessProbe:
# TODO: If docker exec is buggy just use k8s.gcr.io/mysql-healthz:1.0
exec:
command:
- sh
- -c
- "mysql -u root -e 'show databases;'"
initialDelaySeconds: 15
timeoutSeconds: 5
successThreshold: 2
volumeMounts:
- name: datadir
mountPath: /var/lib/
- name: config
mountPath: /etc/mysql
volumes:
- name: config
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql
labels:
app: mysql
data:
master.cnf: |
[mysqld]
log-bin
slave.cnf: |
[mysqld]
super-read-only

View File

@ -1,27 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mysql
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
clusterIP: None
selector:
app: mysql
---
apiVersion: v1
kind: Service
metadata:
name: mysql-read
labels:
app: mysql
spec:
ports:
- name: mysql
port: 3306
selector:
app: mysql
type: LoadBalancer

View File

@ -1,162 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: mysql
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
initContainers:
- name: init-mysql
image: mysql:5.7
command:
- bash
- "-c"
- |
set -ex
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
echo [mysqld] > /mnt/conf.d/server-id.cnf
echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
if [[ $ordinal -eq 0 ]]; then
cp /mnt/config-map/master.cnf /mnt/conf.d/
else
cp /mnt/config-map/slave.cnf /mnt/conf.d/
fi
volumeMounts:
- name: conf
mountPath: /mnt/conf.d
- name: config-map
mountPath: /mnt/config-map
- name: clone-mysql
image: gcr.io/google-samples/xtrabackup:1.0
command:
- bash
- "-c"
- |
set -ex
[[ -d /var/lib/mysql/mysql ]] && exit 0
[[ `hostname` =~ -([0-9]+)$ ]] || exit 1
ordinal=${BASH_REMATCH[1]}
[[ $ordinal -eq 0 ]] && exit 0
ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
xtrabackup --prepare --target-dir=/var/lib/mysql
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
containers:
- name: mysql
image: mysql:5.7.15
env:
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "1"
ports:
- name: mysql
containerPort: 3306
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 1
memory: 1Gi
livenessProbe:
exec:
command: ["mysqladmin", "ping"]
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
exec:
command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
initialDelaySeconds: 5
timeoutSeconds: 1
- name: xtrabackup
image: gcr.io/google-samples/xtrabackup:1.0
ports:
- name: xtrabackup
containerPort: 3307
command:
- bash
- "-c"
- |
set -ex
cd /var/lib/mysql
if [[ -f xtrabackup_slave_info ]]; then
mv xtrabackup_slave_info change_master_to.sql.in
rm -f xtrabackup_binlog_info
elif [[ -f xtrabackup_binlog_info ]]; then
[[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
rm xtrabackup_binlog_info
echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
fi
if [[ -f change_master_to.sql.in ]]; then
echo "Waiting for mysqld to be ready (accepting connections)"
until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
echo "Initializing replication from clone position"
mv change_master_to.sql.in change_master_to.sql.orig
mysql -h 127.0.0.1 <<EOF
$(<change_master_to.sql.orig),
MASTER_HOST='mysql-0.mysql',
MASTER_USER='root',
MASTER_PASSWORD='',
MASTER_CONNECT_RETRY=10;
START SLAVE;
EOF
fi
exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
"xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
volumeMounts:
- name: data
mountPath: /var/lib/mysql
subPath: mysql
- name: conf
mountPath: /etc/mysql/conf.d
resources:
requests:
cpu: 100m
memory: 100Mi
volumes:
- name: conf
emptyDir: {}
- name: config-map
configMap:
name: mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: default
resources:
requests:
storage: 10Gi
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: mysql-pdb
labels:
pdb: mysql
spec:
minAvailable: 2
selector:
matchLabels:
app: mysql

View File

@ -1,51 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql-test-server
spec:
replicas: 3
selector:
matchLabels:
app: test-server
template:
metadata:
labels:
app: test-server
spec:
containers:
- name: test-server
image: k8s.gcr.io/mysql-e2e-test:0.1
imagePullPolicy: Always
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 2
periodSeconds: 2
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: tester-pdb
labels:
pdb: test-server
spec:
minAvailable: 1
selector:
matchLabels:
app: test-server
---
apiVersion: v1
kind: Service
metadata:
labels:
app: test-server
name: test-server
spec:
ports:
- port: 8080
selector:
app: test-server
type: LoadBalancer

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx

View File

@ -1,34 +0,0 @@
apiVersion: apps/v1beta2
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 3
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
annotations:
volume.beta.kubernetes.io/storage-class: nginx-sc
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -1,15 +0,0 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: redis
labels:
app: redis
spec:
ports:
- port: 6379
name: peer
# *.redis.default.svc.cluster.local
clusterIP: None
selector:
app: redis

View File

@ -1,81 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: rd
spec:
serviceName: "redis"
replicas: 3
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
initContainers:
- name: install
image: k8s.gcr.io/redis-install-3.2.0:e2e
imagePullPolicy: Always
args:
- "--install-into=/opt"
- "--work-dir=/work-dir"
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
- name: bootstrap
image: debian:jessie
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=redis"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
containers:
- name: redis
image: debian:jessie
ports:
- containerPort: 6379
name: peer
command:
- /opt/redis/redis-server
args:
- /opt/redis/redis.conf
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/redis/redis-cli -h $(hostname) ping"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /data
- name: opt
mountPath: /opt
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -1,18 +0,0 @@
# A headless service to create DNS records
apiVersion: v1
kind: Service
metadata:
name: zk
labels:
app: zk
spec:
ports:
- port: 2888
name: peer
- port: 3888
name: leader-election
# *.zk.default.svc.cluster.local
clusterIP: None
selector:
app: zk
publishNotReadyAddresses: true

View File

@ -1,88 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zoo
spec:
serviceName: "zk"
replicas: 3
selector:
matchLabels:
app: zk
template:
metadata:
labels:
app: zk
spec:
initContainers:
- name: install
image: k8s.gcr.io/zookeeper-install-3.5.0-alpha:e2e
imagePullPolicy: Always
args:
- "--install-into=/opt"
- "--work-dir=/work-dir"
volumeMounts:
- name: opt
mountPath: "/opt/"
- name: workdir
mountPath: "/work-dir"
- name: bootstrap
image: java:openjdk-8-jre
command:
- "/work-dir/peer-finder"
args:
- -on-start="/work-dir/on-start.sh"
- "-service=zk"
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
volumeMounts:
- name: opt
mountPath: "/opt"
- name: workdir
mountPath: "/work-dir"
- name: datadir
mountPath: "/tmp/zookeeper"
containers:
- name: zk
image: java:openjdk-8-jre
ports:
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
command:
- /opt/zookeeper/bin/zkServer.sh
args:
- start-foreground
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/zookeeper/bin/zkCli.sh ls /"
initialDelaySeconds: 15
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /tmp/zookeeper
- name: opt
mountPath: /opt
# Mount the work-dir just for debugging
- name: workdir
mountPath: /work-dir
volumes:
- name: opt
emptyDir: {}
- name: workdir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi