mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor update for CSI 0.3.0
This commit is contained in:
@ -1,10 +1,13 @@
|
||||
apiVersion: "apps/v1"
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: cassandra
|
||||
spec:
|
||||
serviceName: cassandra
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cassandra
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -1,9 +1,12 @@
|
||||
apiVersion: apps/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cassandra-test-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
14
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cockroachdb/service.yaml
generated
vendored
14
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/cockroachdb/service.yaml
generated
vendored
@ -9,13 +9,6 @@ metadata:
|
||||
labels:
|
||||
app: cockroachdb
|
||||
annotations:
|
||||
# This is needed to make the peer-finder work properly and to help avoid
|
||||
# edge cases where instance 0 comes up after losing its data and needs to
|
||||
# decide whether it should create a new cluster or try to join an existing
|
||||
# one. If it creates a new cluster when it should have joined an existing
|
||||
# one, we'd end up with two separate clusters listening at the same service
|
||||
# endpoint, which would be very bad.
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "_status/vars"
|
||||
@ -31,3 +24,10 @@ spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: cockroachdb
|
||||
# This is needed to make the peer-finder work properly and to help avoid
|
||||
# edge cases where instance 0 comes up after losing its data and needs to
|
||||
# decide whether it should create a new cluster or try to join an existing
|
||||
# one. If it creates a new cluster when it should have joined an existing
|
||||
# one, we'd end up with two separate clusters listening at the same service
|
||||
# endpoint, which would be very bad.
|
||||
publishNotReadyAddresses: true
|
||||
|
@ -5,6 +5,9 @@ metadata:
|
||||
spec:
|
||||
serviceName: "cockroachdb"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cockroachdb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/service.yaml
generated
vendored
4
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/service.yaml
generated
vendored
@ -1,8 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
metadata:
|
||||
name: etcd
|
||||
labels:
|
||||
@ -16,3 +13,4 @@ spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: etcd
|
||||
publishNotReadyAddresses: true
|
||||
|
@ -7,6 +7,9 @@ metadata:
|
||||
spec:
|
||||
serviceName: etcd
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: etcd
|
||||
template:
|
||||
metadata:
|
||||
name: etcd
|
||||
|
5
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/tester.yaml
generated
vendored
5
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/etcd/tester.yaml
generated
vendored
@ -1,9 +1,12 @@
|
||||
apiVersion: apps/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: etcd-test-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -2,8 +2,6 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
name: galera
|
||||
labels:
|
||||
app: mysql
|
||||
@ -15,4 +13,4 @@ spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: mysql
|
||||
|
||||
publishNotReadyAddresses: true
|
||||
|
@ -5,6 +5,9 @@ metadata:
|
||||
spec:
|
||||
serviceName: "galera"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -5,6 +5,9 @@ metadata:
|
||||
spec:
|
||||
serviceName: mysql
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mysql
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -1,9 +1,12 @@
|
||||
apiVersion: apps/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mysql-test-server
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-server
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
3
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml
generated
vendored
3
vendor/k8s.io/kubernetes/test/e2e/testing-manifests/statefulset/redis/service.yaml
generated
vendored
@ -2,8 +2,6 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
name: redis
|
||||
labels:
|
||||
app: redis
|
||||
@ -15,4 +13,3 @@ spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: redis
|
||||
|
||||
|
@ -5,6 +5,9 @@ metadata:
|
||||
spec:
|
||||
serviceName: "redis"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -2,8 +2,6 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations:
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
name: zk
|
||||
labels:
|
||||
app: zk
|
||||
@ -17,4 +15,4 @@ spec:
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: zk
|
||||
|
||||
publishNotReadyAddresses: true
|
||||
|
@ -5,6 +5,9 @@ metadata:
|
||||
spec:
|
||||
serviceName: "zk"
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: zk
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
Reference in New Issue
Block a user