Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

move vagrant to masterless salt #17977

Merged
merged 1 commit into from
Dec 2, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 3 additions & 1 deletion cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@
{% set cloud_config_volume = "" -%}

{% if grains.cloud is defined -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% if grains.cloud != 'vagrant' -%}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why special case? vagrant uses the vagrant cloud provider.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well, looks like you removed that... wonders why.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The only feature that vagrant cloud provider had was listing instances using salt master, which is incompatible with masterless salt.

https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vagrant/vagrant.go#L37

I'm still trying to figure out what to do with the vagrant cloudprovider or if it's necessary.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am still getting out of my turkey coma, it's possible we can remove the vagrant cloud provider, just have not thought it through.

{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}

{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@
{% set cloud_config_volume = "" -%}

{% if grains.cloud is defined -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% if grains.cloud != 'vagrant' -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set service_account_key = " --service-account-private-key-file=/srv/kubernetes/server.key " -%}

{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
Expand Down
1 change: 1 addition & 0 deletions cluster/saltbase/salt/kube-node-unpacker/init.sls
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
/etc/kubernetes/kube-node-unpacker.sh:
file.managed:
- source: salt://kube-node-unpacker/kube-node-unpacker.sh
- makedirs: True
- user: root
- group: root
- mode: 755
Expand Down
2 changes: 1 addition & 1 deletion cluster/saltbase/salt/kubelet/default
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
{% endif -%}

{% set cloud_provider = "" -%}
{% if grains.cloud is defined -%}
{% if grains.cloud is defined and grains.cloud != 'vagrant' -%}
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}

Expand Down
242 changes: 10 additions & 232 deletions cluster/vagrant/provision-master.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# exit on any error
set -e
set -o errexit
set -o nounset
set -o pipefail

# Set the host name explicitly
# See: https://github.com/mitchellh/vagrant/issues/2430
Expand Down Expand Up @@ -47,26 +48,6 @@ function release_not_found() {
exit 1
}

# Look for our precompiled binary releases. When running from a source repo,
# these are generated under _output. When running from an release tarball these
# are under ./server.
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$server_binary_tar" ]]; then
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$server_binary_tar" ]]; then
release_not_found
fi

salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
if [[ ! -f "$salt_tar" ]]; then
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$salt_tar" ]]; then
release_not_found
fi


# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
minion=${NODE_NAMES[$i]}
Expand All @@ -82,87 +63,7 @@ echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
# Configure the master network
provision-network-master

# Update salt configuration
mkdir -p /etc/salt/minion.d
cat <<EOF >/etc/salt/minion.d/master.conf
master: '$(echo "$MASTER_NAME" | sed -e "s/'/''/g")'
auth_timeout: 10
auth_tries: 2
auth_safemode: True
ping_interval: 1
random_reauth_delay: 3
state_aggregrate:
- pkg
EOF

cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
publicAddressOverride: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
network_mode: openvswitch
networkInterfaceName: '$(echo "$NETWORK_IF_NAME" | sed -e "s/'/''/g")'
api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cloud: vagrant
roles:
- kubernetes-master
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
master_extra_sans: '$(echo "$MASTER_EXTRA_SANS" | sed -e "s/'/''/g")'
keep_host_etcd: true
EOF

mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG" | sed -e "s/'/''/g")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET" | sed -e "s/'/''/g")'
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF

# Configure the salt-master
# Auto accept all keys from minions that try to join
mkdir -p /etc/salt/master.d
cat <<EOF >/etc/salt/master.d/auto-accept.conf
open_mode: True
auto_accept: True
EOF

cat <<EOF >/etc/salt/master.d/reactor.conf
# React to new minions starting by running highstate on them.
reactor:
- 'salt/minion/*/start':
- /srv/reactor/highstate-new.sls
EOF

cat <<EOF >/etc/salt/master.d/salt-output.conf
# Minimize the amount of output to terminal
state_verbose: False
state_output: mixed
log_level: debug
log_level_logfile: debug
EOF

cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF

write-salt-config kubernetes-master

# Generate and distribute a shared secret (bearer token) to
# apiserver and kubelet so that kubelet can authenticate to
Expand All @@ -179,57 +80,9 @@ if [[ ! -f "${known_tokens_file}" ]]; then
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask u=rw,go= ; echo "{\"BearerToken\": \"$KUBELET_TOKEN\", \"Insecure\": true }" > $kubelet_auth_file)
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"

mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
users:
- name: kubelet
user:
token: ${KUBELET_TOKEN}
EOF
)


mkdir -p /srv/salt-overlay/salt/kube-proxy
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
# Make a kubeconfig file with the token.
# TODO(etune): put apiserver certs into secret too, and reference from authfile,
# so that "Insecure" is not needed.
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" << EOF
apiVersion: v1
kind: Config
clusters:
- cluster:
insecure-skip-tls-verify: true
name: local
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
EOF
)

create-salt-kubelet-auth
create-salt-kubeproxy-auth
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
Expand All @@ -250,94 +103,19 @@ if [ ! -e "${BASIC_AUTH_FILE}" ]; then
echo "${MASTER_USER},${MASTER_PASSWD},admin" > "${BASIC_AUTH_FILE}")
fi

echo "Running release install script"
rm -rf /kube-install
mkdir -p /kube-install
pushd /kube-install
tar xzf "$salt_tar"
cp "$server_binary_tar" .
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
popd

# Enable Fedora Cockpit on host to support Kubernetes administration
# Access it by going to <master-ip>:9090 and login as vagrant/vagrant
if ! which /usr/libexec/cockpit-ws &>/dev/null; then

pushd /etc/yum.repos.d
wget https://copr.fedoraproject.org/coprs/sgallagh/cockpit-preview/repo/fedora-21/sgallagh-cockpit-preview-fedora-21.repo
yum install -y cockpit cockpit-kubernetes
yum install -y cockpit cockpit-kubernetes
popd

systemctl enable cockpit.socket
systemctl start cockpit.socket
fi

# we will run provision to update code each time we test, so we do not want to do salt installs each time
if ! which salt-master &>/dev/null; then

# Configure the salt-api
cat <<EOF >/etc/salt/master.d/salt-api.conf
# Set vagrant user as REST API user
external_auth:
pam:
vagrant:
- .*
rest_cherrypy:
port: 8000
host: ${MASTER_IP}
disable_ssl: True
webhook_disable_auth: True
EOF
install-salt

# Install Salt Master
#
# -M installs the master
# -N does not install the minion
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -N

# Install salt-api
#
# This is used to provide the network transport for salt-api
yum install -y python-cherrypy
# This is used to inform the cloud provider used in the vagrant cluster
yum install -y salt-api
# Set log level to a level higher than "info" to prevent the message about
# enabling the service (which is not an error) from being printed to stderr.
SYSTEMD_LOG_LEVEL=notice systemctl enable salt-api
systemctl start salt-api
fi

if ! which salt-minion >/dev/null 2>&1; then

# Install Salt minion
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s

# Edit the Salt minion unit file to do restart always
# needed because vagrant uses this as basis for registration of nodes in cloud provider
# set a oom_score_adj to -999 to prevent our node from being killed with salt-master and then making kubelet NotReady
# because its not found in salt cloud provider call
cat <<EOF >/usr/lib/systemd/system/salt-minion.service
[Unit]
Description=The Salt Minion
After=syslog.target network.target

[Service]
Type=simple
ExecStart=/usr/bin/salt-minion
Restart=Always
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
EOF

systemctl daemon-reload
systemctl restart salt-minion.service

else
# Only run highstate when updating the config. In the first-run case, Salt is
# set up to run highstate as new minions join for the first time.
echo "Executing configuration"
salt '*' mine.update
salt --force-color '*' state.highstate
fi
run-salt