Skip to content
This repository was archived by the owner on Nov 9, 2017. It is now read-only.

Commit 96a87c7

Browse files
authored
Merge pull request #101 from TheNewNormal/v0.5.1
v0.5.1
2 parents f01bcba + 4fdc988 commit 96a87c7

File tree

15 files changed

+75
-129
lines changed

15 files changed

+75
-129
lines changed

.gitattributes

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
src/k8s/kube.tgz filter=lfs diff=lfs merge=lfs -text
2+
src/k8s/kubectl filter=lfs diff=lfs merge=lfs -text

src/Kube-Cluster/AppDelegate.m

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -410,24 +410,6 @@ - (IBAction)KubernetesUI:(id)sender {
410410
}
411411

412412

413-
- (IBAction)Kubedash:(id)sender {
414-
VMStatus vmStatus = [self.vmManager checkVMStatus];
415-
416-
switch (vmStatus) {
417-
case VMStatusDown:
418-
[self notifyUserWithText:NSLocalizedString(@"VMStateOff", nil)];
419-
break;
420-
421-
case VMStatusUp: {
422-
NSString *vmIP = [NSString stringWithContentsOfURL:[NSURL ks_masterIpAddressURL] encoding:NSUTF8StringEncoding error:nil];
423-
NSString *url = [NSString stringWithFormat:@"http://%@:8080/api/v1/proxy/namespaces/kube-system/services/kubedash", vmIP];
424-
[[NSWorkspace sharedWorkspace] openURL:[NSURL URLWithString:url]];
425-
break;
426-
}
427-
}
428-
}
429-
430-
431413
- (IBAction)quit:(id)sender {
432414
VMStatus vmStatus = [self.vmManager checkVMStatus];
433415

src/Kube-Cluster/Base.lproj/MainMenu.xib

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
2-
<document type="com.apple.InterfaceBuilder3.Cocoa.XIB" version="3.0" toolsVersion="10117" systemVersion="15F34" targetRuntime="MacOSX.Cocoa" propertyAccessControl="none" useAutolayout="YES">
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<document type="com.apple.InterfaceBuilder3.Cocoa.XIB" version="3.0" toolsVersion="11201" systemVersion="16B2327e" targetRuntime="MacOSX.Cocoa" propertyAccessControl="none" useAutolayout="YES">
33
<dependencies>
4-
<plugIn identifier="com.apple.InterfaceBuilder.CocoaPlugin" version="10117"/>
4+
<plugIn identifier="com.apple.InterfaceBuilder.CocoaPlugin" version="11201"/>
55
</dependencies>
66
<objects>
77
<customObject id="-2" userLabel="File's Owner" customClass="NSApplication">
@@ -668,12 +668,6 @@
668668
<action selector="KubernetesUI:" target="494" id="5Xc-5r-fwB"/>
669669
</connections>
670670
</menuItem>
671-
<menuItem title="Kubedash" id="bXS-8Q-f6a">
672-
<modifierMask key="keyEquivalentModifierMask"/>
673-
<connections>
674-
<action selector="Kubedash:" target="494" id="TJP-yC-M7A"/>
675-
</connections>
676-
</menuItem>
677671
<menuItem isSeparatorItem="YES" id="JNE-kH-7PZ"/>
678672
<menuItem title="Check for App updates" id="mVR-pT-vO7">
679673
<modifierMask key="keyEquivalentModifierMask"/>

src/Kube-Cluster/Kube-Cluster-Info.plist

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,11 +19,11 @@
1919
<key>CFBundlePackageType</key>
2020
<string>APPL</string>
2121
<key>CFBundleShortVersionString</key>
22-
<string>0.5.0</string>
22+
<string>0.5.1</string>
2323
<key>CFBundleSignature</key>
2424
<string>????</string>
2525
<key>CFBundleVersion</key>
26-
<string>197</string>
26+
<string>201</string>
2727
<key>LSApplicationCategoryType</key>
2828
<string>public.app-category.utilities</string>
2929
<key>LSMinimumSystemVersion</key>

src/bin/install_deis

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,6 @@ node1_vm_ip=$(~/bin/corectl q -i k8snode-01)
1212

1313
#
1414
echo " "
15-
echo "If you have installed previous versions of Deis Workflow PaaS, please uninstall it first"
16-
echo "with '$ helmc uninstall workflow-VERSION -n deis' ..."
17-
echo " "
1815
echo "If you want to upgrade already installed version, check out how to do upgrade at https://deis.com/docs/workflow/managing-workflow/upgrading-workflow/"
1916
echo " "
2017
pause 'Press [Enter] key to continue...'

src/cloud-init/user-data.node

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ coreos:
6666
ExecStart=/bin/sh -c '[[ -d /data/kubernetes/manifests ]] || mkdir -p /data/kubernetes/manifests'
6767
ExecStart=/bin/sh -c '[[ -d /data/kubelet ]] || mkdir -p /data/kubelet'
6868
ExecStart=/bin/sh -c 'sudo ln -s /data/kubelet /var/lib/kubelet'
69+
ExecStart=/bin/sh -c 'sudo mkdir -p /opt/bin/ && sudo ln -s /data/opt/bin/socat /opt/bin/socat'
6970
- name: var-lib-docker.mount
7071
command: start
7172
content: |

src/functions.sh

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -591,17 +591,13 @@ echo "Installing Kubernetes UI ..."
591591
~/kube-cluster/bin/kubectl create -f ~/kube-cluster/kubernetes/dashboard-controller.yaml
592592
~/kube-cluster/bin/kubectl create -f ~/kube-cluster/kubernetes/dashboard-service.yaml
593593
#
594-
echo " "
595-
echo "Installing Kubedash ..."
596-
~/kube-cluster/bin/kubectl create -f ~/kube-cluster/kubernetes/kubedash.yaml
597594
sleep 1
598595
# clean up kubernetes folder
599596
rm -f ~/kube-cluster/kubernetes/kube-system-ns.yaml
600597
rm -f ~/kube-cluster/kubernetes/skydns-rc.yaml
601598
rm -f ~/kube-cluster/kubernetes/skydns-svc.yaml
602599
rm -f ~/kube-cluster/kubernetes/dashboard-controller.yaml
603600
rm -f ~/kube-cluster/kubernetes/dashboard-service.yaml
604-
rm -f ~/kube-cluster/kubernetes/kubedash.yaml
605601
echo " "
606602
}
607603

src/k8s/add-ons/dashboard-controller.yaml

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,11 @@
22
apiVersion: v1
33
kind: ReplicationController
44
metadata:
5-
name: kubernetes-dashboard-v1.1.0
5+
name: kubernetes-dashboard-v1.4.0
66
namespace: kube-system
77
labels:
88
k8s-app: kubernetes-dashboard
9-
version: v1.1.0
9+
version: v1.4.0
1010
kubernetes.io/cluster-service: "true"
1111
spec:
1212
replicas: 1
@@ -16,12 +16,15 @@ spec:
1616
metadata:
1717
labels:
1818
k8s-app: kubernetes-dashboard
19-
version: v1.1.0
19+
version: v1.4.0
2020
kubernetes.io/cluster-service: "true"
21+
annotations:
22+
scheduler.alpha.kubernetes.io/critical-pod: ''
23+
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
2124
spec:
2225
containers:
2326
- name: kubernetes-dashboard
24-
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.1.0
27+
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.0
2528
resources:
2629
# keep request = limit to keep this container in guaranteed class
2730
limits:

src/k8s/add-ons/kubedash.yaml

Lines changed: 0 additions & 70 deletions
This file was deleted.

src/k8s/add-ons/skydns-rc.yaml

Lines changed: 35 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,42 @@
11
apiVersion: v1
22
kind: ReplicationController
33
metadata:
4-
name: kube-dns-v17
4+
name: kube-dns-v19
55
namespace: kube-system
66
labels:
77
k8s-app: kube-dns
8-
version: v17
8+
version: v19
99
kubernetes.io/cluster-service: "true"
1010
spec:
1111
replicas: 1
1212
selector:
1313
k8s-app: kube-dns
14-
version: v17
14+
version: v19
1515
template:
1616
metadata:
1717
labels:
1818
k8s-app: kube-dns
19-
version: v17
20-
kubernetes.io/cluster-service: "true"
19+
version: v19
20+
annotations:
21+
scheduler.alpha.kubernetes.io/critical-pod: ''
22+
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
2123
spec:
2224
containers:
2325
- name: kubedns
24-
image: gcr.io/google_containers/kubedns-amd64:1.5
26+
image: gcr.io/google_containers/kubedns-amd64:1.7
2527
resources:
2628
# TODO: Set memory limits when we've profiled the container for large
2729
# clusters, then set request = limit to keep this container in
2830
# guaranteed class. Currently, this container falls into the
2931
# "burstable" category so the kubelet doesn't backoff from restarting it.
3032
limits:
31-
cpu: 100m
32-
memory: 200Mi
33+
memory: 170Mi
3334
requests:
3435
cpu: 100m
35-
memory: 100Mi
36+
memory: 70Mi
3637
livenessProbe:
3738
httpGet:
38-
path: /healthz
39+
path: /healthz-kubedns
3940
port: 8080
4041
scheme: HTTP
4142
initialDelaySeconds: 60
@@ -49,7 +50,7 @@ spec:
4950
scheme: HTTP
5051
# we poll on pod startup for the Kubernetes master service and
5152
# only setup the /readiness HTTP server once that's available.
52-
initialDelaySeconds: 30
53+
initialDelaySeconds: 3
5354
timeoutSeconds: 5
5455
args:
5556
# command = "/kube-dns"
@@ -64,10 +65,20 @@ spec:
6465
protocol: TCP
6566
- name: dnsmasq
6667
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
68+
livenessProbe:
69+
httpGet:
70+
path: /healthz-dnsmasq
71+
port: 8080
72+
scheme: HTTP
73+
initialDelaySeconds: 60
74+
timeoutSeconds: 5
75+
successThreshold: 1
76+
failureThreshold: 5
6777
args:
6878
- --cache-size=1000
6979
- --no-resolv
7080
- --server=127.0.0.1#10053
81+
- --log-facility=-
7182
ports:
7283
- containerPort: 53
7384
name: dns
@@ -76,19 +87,24 @@ spec:
7687
name: dns-tcp
7788
protocol: TCP
7889
- name: healthz
79-
image: gcr.io/google_containers/exechealthz-amd64:1.0
90+
image: gcr.io/google_containers/exechealthz-amd64:1.2
8091
resources:
81-
# keep request = limit to keep this container in guaranteed class
8292
limits:
83-
cpu: 10m
84-
memory: 20Mi
93+
memory: 50Mi
8594
requests:
8695
cpu: 10m
87-
memory: 20Mi
96+
# Note that this container shouldn't really need 50Mi of memory. The
97+
# limits are set higher than expected pending investigation on #29688.
98+
# The extra memory was stolen from the kubedns container to keep the
99+
# net memory requested by the pod constant.
100+
memory: 50Mi
88101
args:
89-
- -cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
90-
- -port=8080
91-
- -quiet
102+
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
103+
- --url=/healthz-dnsmasq
104+
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
105+
- --url=/healthz-kubedns
106+
- --port=8080
107+
- --quiet
92108
ports:
93109
- containerPort: 8080
94110
protocol: TCP

0 commit comments

Comments
 (0)