vendor updates

This commit is contained in:
Serguei Bezverkhi
2018-03-06 17:33:18 -05:00
parent 4b3ebc171b
commit e9033989a0
5854 changed files with 248382 additions and 119809 deletions

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes __PILLAR__DNS__DOMAIN__ __PILLAR__CLUSTER_CIDR__ {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes {{ pillar['dns_domain'] }} {{ pillar['service_cluster_ip_range'] }} {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -57,12 +57,13 @@ data:
Corefile: |
.:53 {
errors
log
health
kubernetes $DNS_DOMAIN $SERVICE_CLUSTER_IP_RANGE {
pods insecure
upstream /etc/resolv.conf
fallthrough in-addr.arpa ip6.arpa
}
prometheus
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
@ -78,7 +79,11 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
@ -93,9 +98,21 @@ spec:
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- coredns
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: coredns/coredns:1.0.1
image: coredns/coredns:1.0.4
imagePullPolicy: IfNotPresent
resources:
limits:
@ -114,9 +131,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
@ -156,6 +170,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics

View File

@ -84,6 +84,7 @@ spec:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
@ -94,7 +95,7 @@ spec:
optional: true
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.8
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -145,7 +146,7 @@ spec:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.8
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -184,7 +185,7 @@ spec:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.8
livenessProbe:
httpGet:
path: /metrics