11apiVersion : v1
22kind : ReplicationController
33metadata :
4- name : kube-dns-v17
4+ name : kube-dns-v19
55 namespace : kube-system
66 labels :
77 k8s-app : kube-dns
8- version : v17
8+ version : v19
99 kubernetes.io/cluster-service : " true"
1010spec :
1111 replicas : 1
1212 selector :
1313 k8s-app : kube-dns
14- version : v17
14+ version : v19
1515 template :
1616 metadata :
1717 labels :
1818 k8s-app : kube-dns
19- version : v17
20- kubernetes.io/cluster-service : " true"
19+ version : v19
20+ annotations :
21+ scheduler.alpha.kubernetes.io/critical-pod : ' '
22+ scheduler.alpha.kubernetes.io/tolerations : ' [{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
2123 spec :
2224 containers :
2325 - name : kubedns
24- image : gcr.io/google_containers/kubedns-amd64:1.5
26+ image : gcr.io/google_containers/kubedns-amd64:1.7
2527 resources :
2628 # TODO: Set memory limits when we've profiled the container for large
2729 # clusters, then set request = limit to keep this container in
2830 # guaranteed class. Currently, this container falls into the
2931 # "burstable" category so the kubelet doesn't backoff from restarting it.
3032 limits :
31- cpu : 100m
32- memory : 200Mi
33+ memory : 170Mi
3334 requests :
3435 cpu : 100m
35- memory : 100Mi
36+ memory : 70Mi
3637 livenessProbe :
3738 httpGet :
38- path : /healthz
39+ path : /healthz-kubedns
3940 port : 8080
4041 scheme : HTTP
4142 initialDelaySeconds : 60
4950 scheme : HTTP
5051 # we poll on pod startup for the Kubernetes master service and
5152 # only setup the /readiness HTTP server once that's available.
52- initialDelaySeconds : 30
53+ initialDelaySeconds : 3
5354 timeoutSeconds : 5
5455 args :
5556 # command = "/kube-dns"
@@ -64,10 +65,20 @@ spec:
6465 protocol : TCP
6566 - name : dnsmasq
6667 image : gcr.io/google_containers/kube-dnsmasq-amd64:1.3
68+ livenessProbe :
69+ httpGet :
70+ path : /healthz-dnsmasq
71+ port : 8080
72+ scheme : HTTP
73+ initialDelaySeconds : 60
74+ timeoutSeconds : 5
75+ successThreshold : 1
76+ failureThreshold : 5
6777 args :
6878 - --cache-size=1000
6979 - --no-resolv
7080 - --server=127.0.0.1#10053
81+ - --log-facility=-
7182 ports :
7283 - containerPort : 53
7384 name : dns
@@ -76,19 +87,24 @@ spec:
7687 name : dns-tcp
7788 protocol : TCP
7889 - name : healthz
79- image : gcr.io/google_containers/exechealthz-amd64:1.0
90+ image : gcr.io/google_containers/exechealthz-amd64:1.2
8091 resources :
81- # keep request = limit to keep this container in guaranteed class
8292 limits :
83- cpu : 10m
84- memory : 20Mi
93+ memory : 50Mi
8594 requests :
8695 cpu : 10m
87- memory : 20Mi
96+ # Note that this container shouldn't really need 50Mi of memory. The
97+ # limits are set higher than expected pending investigation on #29688.
98+ # The extra memory was stolen from the kubedns container to keep the
99+ # net memory requested by the pod constant.
100+ memory : 50Mi
88101 args :
89- - -cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
90- - -port=8080
91- - -quiet
102+ - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
103+ - --url=/healthz-dnsmasq
104+ - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
105+ - --url=/healthz-kubedns
106+ - --port=8080
107+ - --quiet
92108 ports :
93109 - containerPort : 8080
94110 protocol : TCP
0 commit comments