mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
vendor updates
This commit is contained in:
31
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.base
generated
vendored
31
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.base
generated
vendored
@ -57,12 +57,13 @@ data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
@ -78,7 +79,11 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
@ -93,9 +98,21 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.1
|
||||
image: coredns/coredns:1.0.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -114,9 +131,6 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -156,6 +170,3 @@ spec:
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
|
31
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.in
generated
vendored
31
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.in
generated
vendored
@ -57,12 +57,13 @@ data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
@ -78,7 +79,11 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
@ -93,9 +98,21 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.1
|
||||
image: coredns/coredns:1.0.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -114,9 +131,6 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -156,6 +170,3 @@ spec:
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
|
31
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.sed
generated
vendored
31
vendor/k8s.io/kubernetes/cluster/addons/dns/coredns.yaml.sed
generated
vendored
@ -57,12 +57,13 @@ data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
log
|
||||
health
|
||||
kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE {
|
||||
pods insecure
|
||||
upstream /etc/resolv.conf
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
}
|
||||
prometheus
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
}
|
||||
@ -78,7 +79,11 @@ metadata:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
replicas: 2
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns
|
||||
@ -93,9 +98,21 @@ spec:
|
||||
effect: NoSchedule
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- coredns
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.1
|
||||
image: coredns/coredns:1.0.4
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -114,9 +131,6 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@ -156,6 +170,3 @@ spec:
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
|
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.base
generated
vendored
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.base
generated
vendored
@ -84,6 +84,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@ -94,7 +95,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -145,7 +146,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -184,7 +185,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.in
generated
vendored
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.in
generated
vendored
@ -84,6 +84,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@ -94,7 +95,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -145,7 +146,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -184,7 +185,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.sed
generated
vendored
7
vendor/k8s.io/kubernetes/cluster/addons/dns/kube-dns.yaml.sed
generated
vendored
@ -84,6 +84,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
@ -94,7 +95,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -145,7 +146,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -184,7 +185,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
|
||||
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
|
Reference in New Issue
Block a user