Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ubernetes Lite support for GCE #17919

Merged
merged 2 commits into from
Jan 21, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions cluster/aws/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
# The intent is to allow experimentation/advanced functionality before we
# are ready to commit to supporting it.
# Experimental functionality:
# KUBE_SHARE_MASTER=true
# KUBE_USE_EXISTING_MASTER=true
# Detect and reuse an existing master; useful if you want to
# create more nodes, perhaps with a different instance type or in
# a different subnet/AZ
Expand Down Expand Up @@ -808,8 +808,8 @@ function kube-up {
# HTTPS to the master is allowed (for API access)
authorize-security-group-ingress "${MASTER_SG_ID}" "--protocol tcp --port 443 --cidr 0.0.0.0/0"

# KUBE_SHARE_MASTER is used to add minions to an existing master
if [[ "${KUBE_SHARE_MASTER:-}" == "true" ]]; then
# KUBE_USE_EXISTING_MASTER is used to add minions to an existing master
if [[ "${KUBE_USE_EXISTING_MASTER:-}" == "true" ]]; then
# Detect existing master
detect-master

Expand Down
26 changes: 23 additions & 3 deletions cluster/gce/configure-vm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -586,22 +586,42 @@ grains:
- kubernetes-master
cloud: gce
EOF
if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then
cat <<EOF >/etc/gce.conf

cat <<EOF >/etc/gce.conf
[global]
EOF
CLOUD_CONFIG='' # Set to non-empty path if we are using the gce.conf file

if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
project-id = ${PROJECT_ID}
network-name = ${NODE_NETWORK}
EOF
CLOUD_CONFIG=/etc/gce.conf
EXTERNAL_IP=$(curl --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
cat <<EOF >>/etc/salt/minion.d/grains.conf
cloud_config: /etc/gce.conf
advertise_address: '${EXTERNAL_IP}'
proxy_ssh_user: '${PROXY_SSH_USER}'
EOF
fi

if [[ -n "${MULTIZONE:-}" ]]; then
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
CLOUD_CONFIG=/etc/gce.conf
fi

if [[ -n ${CLOUD_CONFIG:-} ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
cloud_config: ${CLOUD_CONFIG}
EOF
else
rm -f /etc/gce.conf
fi

# If the kubelet on the master is enabled, give it the same CIDR range
# as a generic node.
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
Expand Down
43 changes: 38 additions & 5 deletions cluster/gce/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -575,6 +575,22 @@ function kube-up {
find-release-tars
upload-server-tars

if [[ ${KUBE_USE_EXISTING_MASTER:-} == "true" ]]; then
create-nodes
create-autoscaler
else
check-existing
create-network
create-master
create-nodes-firewall
create-nodes-template
create-nodes
create-autoscaler
check-cluster
fi
}

function check-existing() {
local running_in_terminal=false
# May be false if tty is not allocated (for example with ssh -T).
if [ -t 1 ]; then
Expand All @@ -595,7 +611,9 @@ function kube-up {
fi
fi
fi
}

function create-network() {
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
echo "Creating new network: ${NETWORK}"
# The network needs to be created synchronously or we have a race. The
Expand All @@ -618,7 +636,9 @@ function kube-up {
--source-ranges "0.0.0.0/0" \
--allow "tcp:22" &
fi
}

function create-master() {
echo "Starting master and configuring firewalls"
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \
Expand Down Expand Up @@ -663,7 +683,9 @@ function kube-up {
create-certs "${MASTER_RESERVED_IP}"

create-master-instance "${MASTER_RESERVED_IP}" &
}

function create-nodes-firewall() {
# Create a single firewall rule for all minions.
create-firewall-rule "${NODE_TAG}-all" "${CLUSTER_IP_RANGE}" "${NODE_TAG}" &

Expand All @@ -676,7 +698,9 @@ function kube-up {
kube::util::wait-for-jobs || {
echo -e "${color_red}${fail} commands failed.${color_norm}" >&2
}
}

function create-nodes-template() {
echo "Creating minions."

# TODO(zmerlynn): Refactor setting scope flags.
Expand All @@ -690,8 +714,12 @@ function kube-up {
write-node-env

local template_name="${NODE_INSTANCE_PREFIX}-template"

create-node-instance-template $template_name
}

function create-nodes() {
local template_name="${NODE_INSTANCE_PREFIX}-template"

local defaulted_max_instances_per_mig=${MAX_INSTANCES_PER_MIG:-500}

Expand Down Expand Up @@ -731,10 +759,9 @@ function kube-up {
"${NODE_INSTANCE_PREFIX}-group" \
--zone "${ZONE}" \
--project "${PROJECT}" || true;
}

detect-node-names
detect-master

function create-autoscaler() {
# Create autoscaler for nodes if requested
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
METRICS=""
Expand Down Expand Up @@ -764,6 +791,11 @@ function kube-up {
gcloud compute instance-groups managed set-autoscaling "${NODE_INSTANCE_PREFIX}-group" --zone "${ZONE}" --project "${PROJECT}" \
--min-num-replicas "${last_min_instances}" --max-num-replicas "${last_max_instances}" ${METRICS} || true
fi
}

function check-cluster() {
detect-node-names
detect-master

echo "Waiting up to ${KUBE_CLUSTER_INITIALIZATION_TIMEOUT} seconds for cluster initialization."
echo
Expand Down Expand Up @@ -845,7 +877,7 @@ function kube-down {
fi

# Get the name of the managed instance group template before we delete the
# managed instange group. (The name of the managed instnace group template may
# managed instance group. (The name of the managed instance group template may
# change during a cluster upgrade.)
local template=$(get-template "${PROJECT}" "${ZONE}" "${NODE_INSTANCE_PREFIX}-group")

Expand Down Expand Up @@ -1379,6 +1411,7 @@ OPENCONTRAIL_PUBLIC_SUBNET: $(yaml-quote ${OPENCONTRAIL_PUBLIC_SUBNET:-})
E2E_STORAGE_TEST_ENVIRONMENT: $(yaml-quote ${E2E_STORAGE_TEST_ENVIRONMENT:-})
KUBE_IMAGE_TAG: $(yaml-quote ${KUBE_IMAGE_TAG:-})
KUBE_DOCKER_REGISTRY: $(yaml-quote ${KUBE_DOCKER_REGISTRY:-})
MULTIZONE: $(yaml-quote ${MULTIZONE:-})
EOF
if [ -n "${KUBELET_PORT:-}" ]; then
cat >>$file <<EOF
Expand Down
2 changes: 1 addition & 1 deletion docs/proposals/federation-lite.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ make multiple zones better supported.
For the initial implemenation, kube-up must be run multiple times, once for
each zone. The first kube-up will take place as normal, but then for each
additional zone the user must run kube-up again, specifying
`KUBE_SHARE_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
`KUBE_USE_EXISTING_MASTER=true` and `KUBE_SUBNET_CIDR=172.20.x.0/24`. This will then
create additional nodes in a different zone, but will register them with the
existing master.

Expand Down
2 changes: 1 addition & 1 deletion hack/verify-flags/exceptions.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ cluster/aws/templates/salt-minion.sh:# We set the hostname_override to the full
cluster/centos/util.sh: local node_ip=${node#*@}
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
cluster/gce/configure-vm.sh: cloud_config: /etc/gce.conf
cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
cluster/gce/configure-vm.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
cluster/gce/coreos/helper.sh:# cloud_config yaml file should be passed
cluster/gce/util.sh: local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
Expand Down