Adds the workerSupporting files for services #19

Merged
merged 4 commits into from Sep 16, 2016
@@ -24,7 +24,7 @@ def install():
# Get the resource via resource_get
archive = hookenv.resource_get('kubernetes')
if not archive:
- hookenv.status_set('blocked', 'Missing kubernetes binary package')
+ hookenv.status_set('blocked', 'Missing kubernetes resource')
return
hookenv.status_set('maintenance', 'Unpacking Kubernetes.')
@@ -74,8 +74,15 @@ def setup_authentication():
set_state('authentication.setup')
+@when('kube_master_components.installed')
+def set_app_version():
+ ''' Declare the application version to juju '''
+ version = check_output(['kube-apiserver', '--version'])
+ hookenv.application_version_set(version.split(b' ')[-1].rstrip())
@mbruzek

mbruzek Sep 16, 2016

Include a comment with the current version output so if this split/rstrip ever breaks we can understand what changed.

+
+
# @when('k8s.certificate.authority available')
-@when('etcd.available')
+@when('etcd.available', 'kube_master_components.installed')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
@@ -90,10 +97,11 @@ def start_master(etcd):
for service in services:
if start_service(service):
set_state('{0}.available'.format(service))
+ hookenv.open_port(8080)
hookenv.status_set('active', 'Kubernetes master running.')
-@when('apiserver.available')
+@when('apiserver.available', 'kube_master_components.installed')
@when_not('kube-dns.available')
def launch_dns():
'''Create the "kube-system" namespace, the kubedns resource controller, and
@@ -135,9 +143,13 @@ def launch_dns():
# TODO: This needs a much better relationship name...
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
+ ''' Send configuration to the load balancer, and close access to the
+ public interface '''
+ hookenv.close_port(8080)
kube_api.configure(port=8080)
+@when('kube_master_components.installed')
@when_not('kubernetes.dashboard.available')
def launch_kubernetes_dashboard():
''' Launch the Kubernetes dashboard. If not enabled, attempt deletion '''

Large diffs are not rendered by default.

Oops, something went wrong.
@@ -0,0 +1,7 @@
+includes:
+ - 'layer:basic'
+ - 'layer:docker'
+ - 'interface:http'
+ - 'interface:sdn-plugin'
+repo: https://github.com/kubernetes/kubernetes
+
@@ -0,0 +1,28 @@
+name: kubernetes-worker
+summary: The workload bearing units of a kubernetes cluster
+maintainers:
+ - Charles Butler <charles.butler@canonical.com>
+ - Matthew Bruzek <matthew.bruzek@canonical.com>
+description: |
+ Kubernetes is an open-source platform for deplying, scaling, and operations
+ of appliation containers across a cluster of hosts. Kubernetes is portable
+ in that it works with public, private, and hybrid clouds. Extensible through
+ a pluggable infrastructure. Self healing in that it will automatically
+ restart and place containers on healthy nodes if a node ever goes away.
+tags:
+ - misc
+series:
+ - xenial
+subordinate: false
+provides:
+ sdn-plugin:
+ interface: sdn-plugin
+ scope: container
+requires:
+ kube-api-endpoint:
+ interface: http
+resources:
+ kubernetes:
+ type: file
+ filename: kubernetes.tar.gz
+ description: "A tarball packaged release of the kubernetes bins."
@@ -0,0 +1,115 @@
+
+from charms.reactive import when, when_not, set_state
+from charms.docker import DockerOpts
+from charmhelpers.core import hookenv
+from charmhelpers.core import host
+from charmhelpers.fetch import apt_install
+from charms.templating.jinja2 import render
+
+import os
+import subprocess
+
+
+def _reconfigure_docker_for_sdn():
+ ''' By default docker uses the docker0 bridge for container networking.
+ This method removes the default docker bridge, and reconfigures the
+ DOCKER_OPTS to use the flannel networking bridge '''
+
+ hookenv.status_set('maintenance', 'Reconfiguring docker network bridge')
+ host.service_stop('docker')
+ apt_install(['bridge-utils'], fatal=True)
+ # cmd = "ifconfig docker0 down"
+ # ifconfig doesn't always work. use native linux networking commands to
+ # mark the bridge as inactive.
+ cmd = ['ip', 'link', 'set', 'docker0', 'down']
+ subprocess.check_call(cmd)
+
+ cmd = ['brctl', 'delbr', 'docker0']
+ subprocess.check_call(cmd)
+ set_state('docker.restart')
+
+
+@when('docker.available')
+@when_not('kubernetes.worker.bins.installed')
@mbruzek

mbruzek Sep 16, 2016

You made the status message the same with the master, at some point we should make the states similar. 'kubernetes.worker.bins.installed' -> 'kubernetes_worker_components.installed' ... I don't really care the name just so that the two charms use similar terminology.

+def install_kubernetes_components():
+ ''' Unpack the kubernetes worker binaries '''
+ kube_package = hookenv.resource_get('kubernetes')
+ charm_dir = os.getenv('CHARM_DIR')
+
+ if not kube_package:
+ hookenv.status_set('blocked', 'Missing kubernetes resource')
+ return
+
+ hookenv.status_set('maintenance', 'Unpacking kubernetes')
+
+ unpack_path = '{}/files/kubernetes'.format(charm_dir)
+ os.makedirs(unpack_path, exist_ok=True)
+ cmd = ['tar', 'xfz', kube_package, '-C', unpack_path]
+ subprocess.check_call(cmd)
+
+ services = ['kubelet', 'kube-proxy']
+
+ for service in services:
+ unpacked = '{}/files/kubernetes/{}'.format(charm_dir, service)
+ app_path = '/usr/local/bin/{}'.format(service)
+ install = ['install', '-v', unpacked, app_path]
+ subprocess.call(install)
+
+ set_state('kubernetes.worker.bins.installed')
+
+
+@when('kubernetes.worker.bins.installed')
+def set_app_version():
+ ''' Declare the application version to juju '''
+ cmd = ['kubelet', '--version']
+ version = subprocess.check_output(cmd)
+ hookenv.application_version_set(version.split(b' ')[-1].rstrip())
+
+
+@when('sdn-plugin.available', 'docker.available')
+@when_not('sdn.configured')
+def container_sdn_setup(sdn):
+ ''' Receive the information from the SDN plugin, and render the docker
+ engine options. '''
+ hookenv.status_set('maintenance', 'Configuring docker for sdn')
+ sdn_config = sdn.get_sdn_config()
+
+ opts = DockerOpts()
+ opts.add('bip', sdn_config['subnet'])
+ opts.add('mtu', sdn_config['mtu'])
+
+ with open('/etc/default/docker', 'w') as stream:
+ stream.write('DOCKER_OPTS="{}"'.format(opts.to_s()))
+ _reconfigure_docker_for_sdn()
+ set_state('sdn.configured')
+
+
+@when('kube-api-endpoint.available', 'kubernetes.worker.bins.installed')
+def render_init_scripts(kube_api_endpoint):
+ ''' We have related to either an api server or a load balancer connected
+ to the apiserver. Render the config files and prepare for launch '''
+ context = {}
+ context.update(hookenv.config())
+ hosts = []
+ for serv in kube_api_endpoint.services():
+ for unit in serv['hosts']:
+ hosts.append('http://{}:{}'.format(unit['hostname'], unit['port']))
+ print(hosts)
+ unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-')
+ context.update({'kube_api_endpoint': ','.join(hosts),
+ 'JUJU_UNIT_NAME': unit_name})
+
+ os.makedirs('/var/lib/kubelet', exist_ok=True)
+ render('kubelet-kubeconfig', '/etc/kubernetes/kubelet/kubeconfig', context)
+ render('kube-default', '/etc/default/kube-default', context)
+ render('kubelet.defaults', '/etc/default/kubelet', context)
+ render('kube-proxy.service', '/lib/systemd/system/kube-proxy.service',
+ context)
+ render('kubelet.service', '/lib/systemd/system/kubelet.service', context)
+
+ cmd = ['systemctl', 'daemon-reload']
+ subprocess.check_call(cmd)
+
+ host.service_restart('kubelet')
+ host.service_restart('kube-proxy')
+ hookenv.status_set('active', 'Worker ready')
@@ -0,0 +1,22 @@
+###
+# kubernetes system config
+#
+# The following values are used to configure various aspects of all
+# kubernetes services, including
+#
+# kube-apiserver.service
+# kube-controller-manager.service
+# kube-scheduler.service
+# kubelet.service
+# kube-proxy.service
+# logging to stderr means we get it in the systemd journal
+KUBE_LOGTOSTDERR="--logtostderr=true"
+
+# journal message level, 0 is debug
+KUBE_LOG_LEVEL="--v=0"
+
+# Should this cluster be allowed to run privileged docker containers
+KUBE_ALLOW_PRIV="--allow-privileged=false"
+
+# How the controller-manager, scheduler, and proxy find the apiserver
+KUBE_MASTER="--master={{ kube_api_endpoint }}"
@@ -0,0 +1,19 @@
+
+[Unit]
+Description=Kubernetes Kube-Proxy Server
+Documentation=http://kubernetes.io/docs/admin/kube-proxy/
+After=network.target
+
+[Service]
+EnvironmentFile=-/etc/default/kube-default
+EnvironmentFile=-/etc/default/kube-proxy
+ExecStart=/usr/local/bin/kube-proxy \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBE_MASTER \
+ $KUBE_PROXY_ARGS
+Restart=on-failure
+LimitNOFILE=65536
+
+[Install]
+WantedBy=multi-user.target
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: Config
+clusters:
+- name: local
+ cluster:
+ {% if ssl_path %}
+ certificate-authority: {{ ssl_path }}/ca.crt
+ server: {{ kube_api_endpoint }}
+ {% else %}
+ server: {{ kube_api_endpoint }}
+ {% endif %}
+users:
+- name: kubelet
+ user:
+ {% if ssl_path %}
+ client-certificate: {{ ssl_path }}/worker.crt
+ client-key: {{ ssl_path }}/worker-key.key
+ {% endif %}
+contexts:
+- context:
+ cluster: local
+ user: kubelet
+ name: kubelet-context
+current-context: kubelet-context
@@ -0,0 +1,16 @@
+# kubernetes kubelet (node) config
+
+# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
+KUBELET_ADDRESS="--address=127.0.0.1"
+
+# The port for the info server to serve on
+# KUBELET_PORT="--port=10250"
+
+# You may leave this blank to use the actual hostname
+KUBELET_HOSTNAME="--hostname-override={{ JUJU_UNIT_NAME }}"
+
+# location of the api-server
+KUBELET_API_SERVER="--api-servers={{ kube_api_endpoint }}"
+
+# Add your own!
+KUBELET_ARGS=""
@@ -0,0 +1,24 @@
+[Unit]
+Description=Kubernetes Kubelet Server
+Documentation=http://kubernetes.io/docs/admin/kubelet/
+After=docker.service
+Requires=docker.service
+
+[Service]
+WorkingDirectory=/var/lib/kubelet
+EnvironmentFile=-/etc/default/kube-default
+EnvironmentFile=-/etc/default/kubelet
+ExecStart=/usr/local/bin/kubelet \
+ $KUBE_LOGTOSTDERR \
+ $KUBE_LOG_LEVEL \
+ $KUBELET_API_SERVER \
+ $KUBELET_ADDRESS \
+ $KUBELET_PORT \
+ $KUBELET_HOSTNAME \
+ $KUBE_ALLOW_PRIV \
+ $KUBELET_ARGS
+Restart=on-failure
+KillMode=process
+
+[Install]
+WantedBy=multi-user.target
@@ -0,0 +1,31 @@
+#!/usr/bin/python3
+
+import amulet
+import requests
+import unittest
+
+
+class TestCharm(unittest.TestCase):
+ def setUp(self):
+ self.d = amulet.Deployment()
+
+ self.d.add('kubernetes-worker')
+ self.d.expose('kubernetes-worker')
+
+ self.d.setup(timeout=900)
+ self.d.sentry.wait()
+
+ self.unit = self.d.sentry['kubernetes-worker'][0]
+
+ def test_service(self):
+ # test we can access over http
+ page = requests.get('http://{}'.format(self.unit.info['public-address']))
@mbruzek

mbruzek Sep 14, 2016

I know this is a boilerplate test, but we should improve it a little bit to make sure the services are running inside the charm.

run('ps -ef | grep kubelet') and run('ps -ef | grep kube-proxy). Something like that?

+ self.assertEqual(page.status_code, 200)
+ # Now you can use self.d.sentry[SERVICE][UNIT] to address each of the units and perform
+ # more in-depth steps. Each self.d.sentry[SERVICE][UNIT] has the following methods:
+ # - .info - An array of the information of that unit from Juju
+ # - .file(PATH) - Get the details of a file on that unit
+ # - .file_contents(PATH) - Get plain text output of PATH file from that unit
+ # - .directory(PATH) - Get details of directory
+ # - .directory_contents(PATH) - List files and folders in PATH on that unit
+ # - .relation(relation, service:rel) - Get relation data from return service
@@ -0,0 +1,3 @@
+tests: "[a-zA-Z0-9]*"
+packages:
+ - amulet
@@ -0,0 +1 @@
+charms.templating.jinja2>=0.0.1,<2.0.0