Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cluster/gce/coreos: Make kube-up works for both rkt and docker on coreos on gce #27220

Merged
merged 3 commits into from
Jun 18, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
11 changes: 8 additions & 3 deletions cluster/gce/coreos/configure-node.sh
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ function wait-url-up() {
done
}

# Configure addon yamls, and run salt/kube-addons/kube-addon.sh
# Configure addon yamls, and run salt/kube-addons/kube-addons.sh
function configure-master-addons() {
echo "Configuring master addons"

Expand Down Expand Up @@ -160,6 +160,10 @@ function configure-master-addons() {
CLUSTER_REGISTRY_DISK_SIZE=$(convert-bytes-gce-kube "${CLUSTER_REGISTRY_DISK_SIZE}")
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/registry ${addon_dir}/registry
fi

if [[ "${ENABLE_NODE_PROBLEM_DETECTOR}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/node-problem-detector ${addon_dir}/node-problem-detector
fi
}

function configure-master-components() {
Expand All @@ -169,8 +173,8 @@ function configure-master-components() {
configure-kube-apiserver
configure-kube-scheduler
configure-kube-controller-manager
configure-addon-manager
configure-master-addons
configure-addon-manager
}

# TODO(yifan): Merge this with mount-master-pd() in configure-vm.sh
Expand Down Expand Up @@ -297,9 +301,10 @@ function load-docker-images() {
}


# TODO(yifan): Making this function more generic for other runtimes.
function load-master-components-images() {
echo "Loading docker images for master components"
export RKT_BIN=/opt/rkt/rkt
export DOCKER2ACI_BIN=/opt/docker2aci/docker2aci
${SALT_DIR}/install.sh ${KUBE_BIN_TAR}
${SALT_DIR}/salt/kube-master-addons/kube-master-addons.sh

Expand Down
4 changes: 2 additions & 2 deletions cluster/gce/coreos/helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ function create-node-instance-template() {
local template_name="$1"
create-node-template "$template_name" "${scope_flags}" \
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml" \
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node-${CONTAINER_RUNTIME}.yaml" \
"configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh" \
"configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh" \
"cluster-name=${KUBE_TEMP}/cluster-name.txt"
Expand Down Expand Up @@ -65,7 +65,7 @@ function create-master-instance() {
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--can-ip-forward \
--metadata-from-file \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/coreos/master.yaml,configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh,configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt" \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/coreos/master-${CONTAINER_RUNTIME}.yaml,configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh,configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh,cluster-name=${KUBE_TEMP}/cluster-name.txt" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE:-10}" \
${preemptible_master}
Expand Down
87 changes: 23 additions & 64 deletions cluster/gce/coreos/kube-manifests/addons/dns/skydns-rc.yaml
Original file line number Diff line number Diff line change
@@ -1,61 +1,34 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v11
name: kube-dns-v14
namespace: kube-system
labels:
k8s-app: kube-dns
version: v11
version: v14
kubernetes.io/cluster-service: "true"
spec:
replicas: ${DNS_REPLICAS}
selector:
k8s-app: kube-dns
version: v11
version: v14
template:
metadata:
labels:
k8s-app: kube-dns
version: v11
version: v14
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: etcd
image: gcr.io/google_containers/etcd-amd64:2.2.1
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.3
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 500Mi
requests:
cpu: 100m
memory: 50Mi
command:
- /usr/local/bin/etcd
- -data-dir
- /var/etcd/data
- -listen-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -advertise-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -initial-cluster-token
- skydns-etcd
volumeMounts:
- name: etcd-storage
mountPath: /var/etcd/data
- name: kube2sky
image: gcr.io/google_containers/kube2sky:1.15
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
# Kube2sky watches all pods.
memory: 200Mi
requests:
cpu: 100m
Expand All @@ -78,32 +51,23 @@ spec:
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
timeoutSeconds: 5
command:
- /kube2sky
args:
# command = "/kube2sky"
- --domain=${DNS_DOMAIN}
- name: skydns
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
command:
- /skydns
# command = "/kube-dns"
- --domain=${DNS_DOMAIN}.
- --dns-port=10053
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/dnsmasq:1.1
args:
# command = "/skydns"
- -machines=http://127.0.0.1:4001
- -addr=0.0.0.0:53
- -ns-rotate=false
- -domain=${DNS_DOMAIN}.
- --cache-size=1000
- --no-resolv
- --server=127.0.0.1#10053
ports:
- containerPort: 53
name: dns
Expand All @@ -112,7 +76,7 @@ spec:
name: dns-tcp
protocol: TCP
- name: healthz
image: gcr.io/google_containers/exechealthz:1.0
image: gcr.io/google_containers/exechealthz-amd64:1.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
Expand All @@ -121,15 +85,10 @@ spec:
requests:
cpu: 10m
memory: 20Mi
command:
- /exechealthz
args:
- -cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null
- -port=8080
ports:
- containerPort: 8080
protocol: TCP
volumes:
- name: etcd-storage
emptyDir: {}
dnsPolicy: Default # Don't use cluster DNS.
dnsPolicy: Default # Don't use cluster DNS.
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.8
- image: gcr.io/google_containers/elasticsearch:1.9
name: elasticsearch-logging
resources:
# keep request = limit to keep this container in guaranteed class
Expand All @@ -40,4 +40,4 @@ spec:
mountPath: /data
volumes:
- name: es-persistent-storage
emptyDir: {}
emptyDir: {}
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: node-problem-detector-v0.1
namespace: kube-system
labels:
k8s-app: node-problem-detector
version: v0.1
kubernetes.io/cluster-service: "true"
spec:
template:
metadata:
labels:
k8s-app: node-problem-detector
version: v0.1
kubernetes.io/cluster-service: "true"
spec:
hostNetwork: true
containers:
- name: node-problem-detector
image: gcr.io/google_containers/node-problem-detector:v0.1
env:
# Config the host ip and port of apiserver.
- name: "KUBERNETES_SERVICE_HOST"
value: ${INSTANCE_PREFIX}-master
- name: "KUBERNETES_SERVICE_PORT"
value: "443"
securityContext:
privileged: true
resources:
limits:
cpu: "200m"
memory: "100Mi"
requests:
cpu: "20m"
memory: "20Mi"
volumeMounts:
- name: log
mountPath: /log
readOnly: true
volumes:
- name: log
hostPath:
path: /var/log/
2 changes: 1 addition & 1 deletion cluster/gce/coreos/kube-manifests/etcd-events.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ spec:
--bind-addr=127.0.0.1:4002
--data-dir=/var/etcd/data-events
1>>/var/log/etcd-events.log 2>&1
image: gcr.io/google_containers/etcd:2.0.12
image: gcr.io/google_containers/etcd:2.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
Expand Down
4 changes: 3 additions & 1 deletion cluster/gce/coreos/kube-manifests/kube-addon-manager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@ spec:
resources:
requests:
cpu: 5m
memory: 50Mi
# TODO(yifan): Figure out what's the memory usage should be here.
# See https://github.com/kubernetes/kubernetes/issues/23641.
memory: 100Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,27 +35,9 @@ coreos:
Type=oneshot
RemainAfterExit=yes
ExecStartPre=/usr/bin/mkdir -p /opt/cni
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/cni.tar.gz https://storage.googleapis.com/kubernetes-release/network-plugins/cni-42c4cb842dad606a84e93aad5a4484ded48e3046.tar.gz
ExecStartPre=/usr/bin/curl --fail --silent --show-error --location --create-dirs --output /opt/downloads/cni.tar.gz https://storage.googleapis.com/kubernetes-release/network-plugins/cni-c864f0e1ea73719b8f4582402b0847064f9883b0.tar.gz
ExecStart=/usr/bin/tar xf /opt/downloads/cni.tar.gz -C /opt/cni/

- name: kubernetes-install-rkt.service
command: start
content: |
[Unit]
Description=Fetch rkt
Documentation=http://github.com/coreos/rkt
Requires=network-online.target
After=network-online.target
Requires=kube-env.service
After=kube-env.service
[Service]
Type=oneshot
RemainAfterExit=yes
EnvironmentFile=/etc/kube-env
ExecStartPre=/usr/bin/mkdir -p /etc/rkt /opt/downloads /opt/rkt/
ExecStartPre=/usr/bin/curl --fail --silent --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
ExecStart=/usr/bin/tar --strip-components=1 -xf /opt/downloads/rkt.tar.gz -C /opt/rkt/ --overwrite

- name: kubernetes-download-salt.service
command: start
content: |
Expand Down Expand Up @@ -139,7 +121,7 @@ coreos:
--cluster-dns=${DNS_SERVER_IP} \
--cluster-domain=${DNS_DOMAIN} \
--logtostderr=true \
--container-runtime=${KUBERNETES_CONTAINER_RUNTIME} \
--container-runtime=docker \
--rkt-path=/opt/rkt/rkt \
--rkt-stage1-image=/opt/rkt/stage1-coreos.aci \
--configure-cbr0=${KUBERNETES_CONFIGURE_CBR0} \
Expand All @@ -149,7 +131,7 @@ coreos:
Restart=always
RestartSec=10
KillMode=process

- name: docker.service
drop-ins:
- name: 50-docker-opts.conf
Expand All @@ -171,8 +153,6 @@ coreos:
Description=Configure Node For Kubernetes service
Requires=kubernetes-install-node.service
After=kubernetes-install-node.service
Requires=kubernetes-install-rkt.service
After=kubernetes-install-rkt.service
Requires=kubernetes-download-salt.service
After=kubernetes-download-salt.service
Requires=kubernetes-download-manifests.service
Expand Down