diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..bb1f04e
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,28 @@
+#!/usr/bin/make
+
+charm_dir := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+all: lint unit_test build
+
+clean:
+ rm -rf $(charm_dir).tox
+ rm -rf $(charm_dir).cache
+ rm -rf $(charm_dir).unit-state.db
+ rm -rf $(charm_dir)*/__pycache__
+
+lint:
+ tox -c $(charm_dir)tox.ini -e lint
+
+unit_test:
+ifdef VERBOSE
+ tox -c $(charm_dir)tox.ini -- -v -s
+else
+ tox -c $(charm_dir)tox.ini
+endif
+
+build: clean
+ifdef FORCE
+ charm build $(charm_dir) --force
+else
+ charm build $(charm_dir)
+endif
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..65da170
--- /dev/null
+++ b/README.md
@@ -0,0 +1,237 @@
+# Overview
+
+The Elastisys CharmScaler is an autoscaler for Juju applications. It
+automatically scales your charm by adding units at times of high load and by
+removing units at times of low load.
+
+The initial edition of the CharmScaler features a simplified version of
+[Elastisys](https://elastisys.com)' autoscaling engine (described below),
+without its
+[_predictive capabilities_](https://elastisys.com/cloud-platform-features/predictive-auto-scaling/)
+and with limited scaling metric support. Work is underway on a more
+fully-featured CharmScaler, but no release date has been set yet.
+
+The initial CharmScaler edition scales the number of units of your applications
+based on the observed CPU usage. These CPU metrics are collected from your
+application by a [telegraf](https://jujucharms.com/telegraf/) agent, which
+pushes the metrics into an
+[InfluxDB](https://jujucharms.com/u/chris.macnaughton/influxdb/) backend, from
+where they are consumed by the CharmScaler.
+
+The CharmScaler is available both free-of-charge and as a subscription service.
+The free version comes with a size restriction which currently limits the size
+of the scaled application to four units. Subscription users will see no such
+size restrictions. For more details refer to the [Subscription](#Subscription)
+section below.
+
+If you are eager to try out the CharmScaler, head directly to the
+[Quickstart](#Quickstart) section. If you want to learn more about the
+Elastisys autoscaler, read on ...
+
+# Introducing the Elastisys Autoscaler
+
+User experience is king. You want to offer your users a smooth ride. From a
+performance perspective, this translates into providing them with a responsive
+service. As response times increase you will see more and more users leaving,
+perhaps for competing services.
+
+An application can be tuned in many ways, but one critical aspect is to make
+sure that it runs on sufficient hardware, capable of bearing the weight that is
+placed on your system. However, resource planning is notoriously hard and
+involves a lot of guesswork. A fixed "peak-dimensioned" infrastructure is
+certain to have you overspending most of the time and, what's worse, you can
+never be sure that it actually will be able to handle the next load surge.
+Ideally, you want to run with just the right amount of resources at all times.
+It is plain to see that such a process involves a lot of planning and manual
+labor.
+
+Elastisys automates this process with a sophisticated autoscaler. The Elastisys
+autoscaler uses proactive scaling algorithms based on state-of-the-art
+research, which, predictively offers _just in time capacity_. That is, it can
+provision servers in advance so that the right amount of capacity is available
+_when it is needed_, not when you _realize_ that it's needed (by then your
+application may already be suffering). Research has shown that there is no
+single scaling algorithm to rule them all. Different workload patterns require
+different algorithms. The Elastisys autoscaler is armed with a growing
+collection of such algorithms.
+
+The Elastisys autoscaler already supports a
+[wide range of clouds and platforms](https://github.com/elastisys/scale.cloudpool).
+With the addition of the Juju CharmScaler, which can scale any Juju application
+Charm, integration with your application has never been easier. Whether it’s a
+Wordpress site, a Hadoop cluster, a Kubernetes cluster, or even OpenStack
+compute nodes, or your own custom-made application charm, hooking it up to be
+scaled by the Elastisys autoscaler is really easy.
+
+Read more about Elastisys' cloud automation platform at
+[https://elastisys.com](https://elastisys.com).
+
+# Subscription
+
+The free edition places a constraint on the size of the scaled application to
+four units. To remove this restriction you need to become a paying subscription
+user. Juju is currently in beta, and does not yet support commercial charms.
+Once Juju is officially released, the CharmScaler will be available as a
+subscription service. Until then, you can contact us and we will help you set
+up a temporary subscription arrangement.
+
+For upgrading to a premium subscription, for a customized solution, or for
+general questions or feature requests, feel free to contact Elastisys at
+[contact@elastisys.com](mailto:contact@elastisys.com).
+
+# Quickstart
+
+If you can't wait to get started, the following minimal example (relying on
+configuration defaults) will let you start scaling your charm right away. For a
+description of the CharmScaler and further details on its configuration, refer
+to the sections below.
+
+Minimal config.yaml example
+
+ charmscaler:
+ juju_api_endpoint: "[API address]:17070"
+ juju_model_uuid: "[uuid]"
+ juju_username: "[username]"
+ juju_password: "[password]"
+
+Deploy and relate the charms
+
+ juju deploy cs:~elastisys/charmscaler --config=config.yaml
+ juju deploy cs:~chris.macnaughton/influxdb
+ juju deploy telegraf
+ juju deploy [charm]
+
+ juju relate charmscaler:db-api influxdb:api
+ juju relate telegraf:influxdb-api influxdb:api
+ juju relate telegraf:juju-info [charm]:juju-info
+ juju relate charmscaler:juju-info [charm]:juju-info
+
+# How the CharmScaler operates
+
+![CharmScaler flow](https://cdn.rawgit.com/elastisys/layer-charmscaler/master/charmscaler.svg)
+
+The image above illustrates the flow of the CharmScaler when scaling a
+Wordpress application. Scaling decisions executed by the CharmScaler are
+dependent on a load metric. In this case it looks at the CPU usage of machines
+where Wordpress instances are deployed.
+
+Metrics are collected by the Telegraf agent which is deployed as a subordinate
+charm attached to the Wordpress application. This means that whenever the
+Wordpress application is scaled out, another Telegraf collector will be
+deployed as well and automatically start pushing new metrics to InfluxDB.
+
+The CharmScaler will ask InfluxDB for new metric datapoints at every poll
+interval (configured using the `metric_poll_interval` option). From these load
+metrics the CharmScaler decides how many units are needed by your application.
+
+In the case of Wordpress it is necessary to distribute the load on all of the
+units using a load balancer. If you haven't already, checkout the Juju
+documentation page on
+[charm scaling](https://jujucharms.com/docs/2.0/charms-scaling).
+
+# Configuration explained
+
+The CharmScaler's configuration is comprised of three main parts:
+`juju`, `scaling` and `alerts`.
+
+#### Juju
+
+The CharmScaler manages the number of units of the scaled charm via the Juju
+controller. To be able to do that it needs to authenticate with the controller.
+Controller authentication credentials are passed to the CharmScaler through
+options prefixed with `juju_`.
+
+Note that in a foreseeable future, passing this kind of credentials to the
+CharmScaler may no longer be necessary. Instead of requiring you to manually
+type in the authentication details one could envision Juju giving the charm
+access through relations or something similar.
+
+#### Scaling
+
+The CharmScaler has a number of config options that control the autoscaler's
+behavior. Those options are prefixed with either `scaling_` or `metric_`.
+`metric_` options control the way metrics are fetched and processed while the
+`scaling_` options control when and how the charm units are scaled.
+
+The scaling algorithm available in this edition of the CharmScaler is a
+rule-based one that looks at CPU usage. At each iteration (configured using the
+`scaling_interval` option) the following rules are considered by the autoscaler
+before making a scaling decision:
+
+1. `scaling_cooldown` - Has enough time passed since the last scale-event
+ (scale in or out) occured?
+2. `scaling_cpu_[max/min]` - Is the CPU usage above/below the set limit?
+3. `scaling_period_[up/down]scale` - Has the CPU usage been above/below
+ `scaling_cpu_[max/min]` for a long enough period of time?
+
+If all three rules above are satisifed either a scale-out or a scale-in occurs
+and the scaled charm will automatically add or remove a unit.
+
+Note that configuring the scaling algorithm is a balancing act -- one always
+needs to balance the need to scale "quickly enough" against the need to avoid
+"jumpy behavior". Too frequent scale-ups/scale-downs could have a negative
+impact on overall performance/system stability.
+
+The default behavior adds a new unit when the average CPU usage (over all charm
+units) has exceeded 80% for at least one minute. If you want to make the
+CharmScaler quicker to respond to changes, you can, for example, lower the
+threshold to 60% and the evaluation period to 30 seconds:
+
+ juju config charmscaler scaling_cpu_max=60
+ juju config charmscaler scaling_period_upscale=30
+
+Similarly, the default behavior removes a new unit when the average CPU usage
+has been under 20% (`scaling_cpu_min`) for at least two minutes
+(`scaling_period_downscale`). Typically, it is preferable to allow the
+application to be overprovisioned for some time to prevent situations where we
+are too quick to scale down, only to realize that the load dip was only
+temporary and that we need to scale back up again. We can, for instance, set
+the evaluation period preceding scale-downs a bit longer (five minutes) via:
+
+ juju config charmscaler scaling_period_downscale=300
+
+Finally, changing the amount of time required between two scaling decisions can
+be done via:
+
+ juju config charmscaler scaling_cooldown=300
+
+This parameter should, however, be kept long enough to give scaling decisions a
+chance to take effect, before a new scaling decision is triggered.
+
+#### Alerts
+
+Lastly, the options with the `alert_` prefix are used to enable CharmScaler
+alerts (these are turned off by default).
+
+Alerts are used to notify the outside world (such as the charm owner) of
+noteable scaling events or error conditions. Alerts are, for example, sent
+(with severity-level `ERROR`) if there are problems to reach the Juju
+controller. Alerts are also sent (with severity-level `INFO`) when a scaling
+decision has been made.
+
+This edition of the CharmScaler includes email alerts which are configured by
+entering the SMTP server details which the autoscaler is supposed to send the
+alert email messages to.
+
+## Known limitations
+
+#### When deploying on LXD provider
+
+Due to missing support for the Docker LXC profile in Juju you need to apply it
+manually.
+
+See: https://bugs.launchpad.net/juju/+bug/1552815
+
+#### InfluxDB co-location
+
+The [Docker layer](https://github.com/juju-solutions/layer-docker) currently
+does not support installing Docker Compose in a virtual environment. Until
+[this is supported](https://github.com/juju-solutions/layer-docker/issues/98)
+InfluxDB cannot be co-located with the CharmScaler charm because of dependency
+conflicts.
+
+-----------------------
+
+By using the Elastisys CharmScaler, you agree to its
+[license](https://elastisys.com/documents/legal/documents/legal/elastisys-software-license/)
+and [privacy statement](https://elastisys.com/documents/legal/privacy-policy/).
diff --git a/actions.yaml b/actions.yaml
new file mode 100644
index 0000000..cd3695d
--- /dev/null
+++ b/actions.yaml
@@ -0,0 +1,13 @@
+smtpserver:
+ description: Run a local SMTP server (for test purposes)
+ params:
+ operation:
+ type: string
+ port:
+ type: integer
+docker-inspect:
+ description: Execute and retrieve output from `docker inpect`
+ params:
+ container:
+ type: string
+ description: Container name or ID
diff --git a/actions/docker-inspect b/actions/docker-inspect
new file mode 100755
index 0000000..6f830f5
--- /dev/null
+++ b/actions/docker-inspect
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+CONTAINER=$(action-get container)
+
+result=$(docker inspect $CONTAINER)
+if [ "$?" != "0" ]; then
+ action-fail "$result"
+ juju-log -l ERROR "$result"
+else
+ juju-log -l DEBUG "$result"
+ action-set output="$result"
+fi
diff --git a/actions/smtpserver b/actions/smtpserver
new file mode 100755
index 0000000..77b61be
--- /dev/null
+++ b/actions/smtpserver
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+if [ "$(action-get operation)" == "start" ]; then
+ actions/smtpserver.py &
+ pid=$!
+
+ # Give the server some time to start up
+ sleep 3
+
+ # If the server is still running we assume everything works
+ kill -0 $pid > /dev/null 2>&1
+else
+ actions/smtpserver.py
+fi
+
+if [ "$?" != "0" ]; then
+ message="SMTP server error, check the Juju logs"
+ action-fail "$message"
+ juju-log -l ERROR "$message"
+fi
diff --git a/actions/smtpserver.py b/actions/smtpserver.py
new file mode 100755
index 0000000..ad2f140
--- /dev/null
+++ b/actions/smtpserver.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python3.5
+import asyncio
+import logging
+import sys
+
+sys.path.append("lib")
+from charms.layer.basic import activate_venv # noqa: E402
+activate_venv()
+
+from aiosmtpd.controller import Controller # noqa: E402
+from aiosmtpd.handlers import Message # noqa: E402
+from aiozmq import rpc # noqa: E402
+from charmhelpers.core import hookenv # noqa: E402
+import zmq # noqa: E402
+
+
+class Inbox(Message):
+ def __init__(self):
+ self.mail = list()
+ super().__init__()
+
+ @property
+ def count(self):
+ return len(self.mail)
+
+ def handle_message(self, message):
+ self.mail.append(message)
+
+
+class SMTPHandler(rpc.AttrHandler):
+ def __init__(self):
+ self.inbox = Inbox()
+ self.controller = Controller(self.inbox, port=0, ready_timeout=1)
+ self.controller.start()
+
+ @property
+ def port(self):
+ return self.controller.server.sockets[0].getsockname()[1]
+
+ @property
+ def running(self):
+ return self.controller.thread.is_alive()
+
+ @rpc.method
+ def inboxcount(self):
+ return self.inbox.count
+
+ @rpc.method
+ def stop(self):
+ # TODO Call later is currently needed to let the RPC client recieve a
+ # response before the server exits.
+ # See: https://github.com/aio-libs/aiozmq/issues/39
+ loop = asyncio.get_event_loop()
+ loop.call_later(0.01, self.controller.stop)
+
+
+class Server:
+ @classmethod
+ async def start(cls):
+ self = cls()
+ self.smtp = SMTPHandler()
+ self.server = await rpc.serve_rpc(self.smtp, bind="tcp://127.0.0.1:*")
+ return self
+
+ @property
+ def port(self):
+ return list(self.server.transport.bindings())[0].split(':')[-1]
+
+ async def wait(self):
+ while self.smtp.running:
+ await asyncio.sleep(0)
+
+ async def stop(self):
+ if self.smtp is not None and self.smtp.running:
+ self.smtp.stop()
+ if self.server is not None:
+ self.server.close()
+ await self.server.wait_closed()
+
+
+class Client:
+ @classmethod
+ async def connect(cls, port):
+ self = cls()
+ address = "tcp://127.0.0.1:{}".format(port)
+ self.client = await rpc.connect_rpc(connect=address, timeout=3)
+ self.client.transport.setsockopt(zmq.LINGER, 0)
+ return self
+
+ def __getattr__(self, name):
+ return getattr(self.client.call, name)
+
+
+async def server():
+ server = await Server.start()
+
+ ports = {
+ "ports.rpc": server.port,
+ "ports.smtp": server.smtp.port
+ }
+ hookenv.log("SMTP server started - ports: {}".format(ports))
+ hookenv.action_set(ports)
+
+ try:
+ await server.wait()
+ except KeyboardInterrupt:
+ pass
+
+ await server.stop()
+
+
+async def client(op):
+ try:
+ port = hookenv.action_get("port")
+ client = await Client.connect(port)
+
+ if op == "inboxcount":
+ count = await client.inboxcount()
+ hookenv.action_set({"count": count})
+ hookenv.log("SMTP server inbox count: {}".format(count))
+ elif op == "stop":
+ await client.stop()
+ hookenv.log("SMTP server successfully stopped")
+ except asyncio.TimeoutError:
+ raise Exception("SMTP server RPC connection timed out")
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+ logging.getLogger("asyncio").setLevel(logging.INFO)
+
+ op = hookenv.action_get("operation")
+
+ loop = asyncio.get_event_loop()
+
+ try:
+ loop.run_until_complete(server() if op == "start" else client(op))
+ except KeyboardInterrupt:
+ pass
+ except Exception as e:
+ msg = str(e)
+ hookenv.action_fail(msg)
+ hookenv.log(msg, level=hookenv.ERROR)
+ finally:
+ for task in asyncio.Task.all_tasks():
+ task.cancel()
+ loop.stop()
+ loop.close()
diff --git a/charmscaler.svg b/charmscaler.svg
new file mode 100644
index 0000000..07ac6e6
--- /dev/null
+++ b/charmscaler.svg
@@ -0,0 +1,505 @@
+
+
diff --git a/config.yaml b/config.yaml
new file mode 100644
index 0000000..9f3d093
--- /dev/null
+++ b/config.yaml
@@ -0,0 +1,150 @@
+options:
+ name:
+ type: string
+ default: CharmScaler
+ description: |
+ The name of the service - mainly shows up in the alert e-mails
+
+ Also useful to distinguish between multiple CharmScaler charms
+ juju_api_endpoint:
+ type: string
+ default: null
+ description: |
+ Juju controller API endpoint
+ juju_model_uuid:
+ type: string
+ default: null
+ description: |
+ Juju model UUID
+ juju_username:
+ type: string
+ default: null
+ description: |
+ Juju account username
+ juju_password:
+ type: string
+ default: null
+ description: |
+ Juju account password
+ juju_refresh_interval:
+ type: int
+ default: 5
+ description: |
+ How often the charmscaler should sync against the Juju model.
+ port_autoscaler:
+ type: int
+ default: 8097
+ description: |
+ Port which the Autoscaler API should be served on.
+ metric_poll_interval:
+ type: int
+ default: 10
+ description: |
+ Seconds between polls for new metric values
+ metric_data_settling_interval:
+ type: int
+ default: 15
+ description: |
+ The minimum age (in seconds) of requested data points. When requesting
+ recent aggregate metric data points, there is always a risk of seeing
+ partial/incomplete results before metric values from all sources have
+ been registered. The value to set for this field depends on the reporting
+ frequency of monitoring agents, but as a general rule-of-thumb, this
+ value can be set to be about 1.5 times the length of the
+ reporting-interval for monitoring agents.
+ scaling_units_min:
+ type: int
+ default: 1
+ description: |
+ Minimum amount of units to keep in pool
+ scaling_units_max:
+ type: int
+ default: 4
+ description: |
+ Maximum amount of units to keep in pool
+ scaling_interval:
+ type: int
+ default: 10
+ description: |
+ Seconds between each scaling decision
+ scaling_cpu_max:
+ type: int
+ default: 80
+ description: |
+ CPU usage threshold at which the number of units should be scaled up.
+ scaling_cpu_min:
+ type: int
+ default: 20
+ description: |
+ CPU threshold where the load is considered low enough to scale down the
+ number of units.
+ scaling_period_upscale:
+ type: int
+ default: 60
+ description: |
+ Number of seconds that the CPU usage needs to be higher than the
+ threshold before scaling up.
+ scaling_period_downscale:
+ type: int
+ default: 120
+ description: |
+ Number of seconds that the CPU usage needs to be lower than the threshold
+ before scaling down.
+ scaling_cooldown:
+ type: int
+ default: 300
+ description: |
+ Time (in seconds) before making another scaling decision from the time of
+ the last up- or downscale. This is useful to prevent extra resizes due to
+ slow teardowns or, in perticular, upstarts.
+ alert_enabled:
+ type: boolean
+ default: false
+ description: |
+ Toggle e-mail alerts on/off
+ alert_smtp_host:
+ type: string
+ default: null
+ description: SMTP hostname
+ alert_smtp_port:
+ type: int
+ default: 25
+ description: SMTP port
+ alert_smtp_ssl:
+ type: boolean
+ default: false
+ description: |
+ Use SSL when connecting to SMTP host
+ alert_smtp_username:
+ type: string
+ default: null
+ description: |
+ Username to auth with the SMTP server
+ alert_smtp_password:
+ type: string
+ default: null
+ description: |
+ Password to auth with the SMTP server
+ alert_sender:
+ type: string
+ default: null
+ description: |
+ E-mail address that alert mails should be sent from
+ alert_receivers:
+ type: string
+ default: null
+ description: |
+ Space separated list of e-mail addresses that should recieve alerts
+ alert_levels:
+ # TODO: Fix @ enum-support
+ type: string
+ default: INFO NOTICE WARN ERROR FATAL
+ description: |
+ Alert levels that should trigger alert mails to be sent out
+ charmpool_url:
+ type: string
+ default: http://charmpool:80
+ description: |
+ URL to the Charmpool component. By default both the autoscaler and the
+ pool is run in the same Docker network and will reach eachother by their
+ local hostnames.
diff --git a/copyright b/copyright
new file mode 100644
index 0000000..280bb29
--- /dev/null
+++ b/copyright
@@ -0,0 +1,13 @@
+Copyright 2016 Elastisys AB
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/files/docker-compose-base.yml b/files/docker-compose-base.yml
new file mode 100644
index 0000000..44777e4
--- /dev/null
+++ b/files/docker-compose-base.yml
@@ -0,0 +1,9 @@
+version: "2"
+
+services:
+ _base:
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "10m"
+ max-file: "1"
diff --git a/icon.svg b/icon.svg
new file mode 100644
index 0000000..b153909
--- /dev/null
+++ b/icon.svg
@@ -0,0 +1,418 @@
+
+
+
+
diff --git a/layer.yaml b/layer.yaml
new file mode 100644
index 0000000..f694fa1
--- /dev/null
+++ b/layer.yaml
@@ -0,0 +1,10 @@
+repo: https://github.com/elastisys/layer-charmscaler
+includes:
+ - layer:docker
+ - layer:metrics
+ - interface:influxdb-api
+ - interface:juju-info
+options:
+ basic:
+ use_venv: true
+ include_system_packages: true
diff --git a/lib/reactive/__init__.py b/lib/reactive/__init__.py
new file mode 100644
index 0000000..3a95535
--- /dev/null
+++ b/lib/reactive/__init__.py
@@ -0,0 +1,13 @@
+# work-around until fix is released in charms.reactive
+# import reactive.X instead of relative imports
+import os.path
+from pkgutil import extend_path
+import sys
+
+
+charm_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.pardir, os.pardir))
+if charm_dir not in sys.path:
+ sys.path.append(charm_dir)
+
+__path__ = extend_path(__path__, __name__)
diff --git a/metadata.yaml b/metadata.yaml
new file mode 100644
index 0000000..67427c5
--- /dev/null
+++ b/metadata.yaml
@@ -0,0 +1,27 @@
+name: charmscaler
+summary: The Elastisys CharmScaler is an autoscaler for Juju applications
+maintainers:
+ - Elastisys
+description: |
+ The CharmScaler is an autoscaler for Juju applications. Based on Elastisys'
+ autoscaling engine, it rightsizes your application deployments using
+ sophisticated auto-scaling algorithms to ensure that the application runs
+ cost-efficiently and is responsive at all times, even in the face of sudden
+ load spikes. At times of high anticipated load your charm is reinforced with
+ additional units -- units that are automatically decomissioned as the
+ pressure on your application goes down.
+tags:
+ - ops
+ - performance
+series:
+ - xenial
+subordinate: false
+requires:
+ scalable-charm:
+ interface: juju-info
+ db-api:
+ interface: influxdb-api
+resources:
+ docker-images:
+ type: file
+ filename: docker-images.tar
diff --git a/reactive/autoscaler.py b/reactive/autoscaler.py
new file mode 100644
index 0000000..cb87293
--- /dev/null
+++ b/reactive/autoscaler.py
@@ -0,0 +1,179 @@
+from requests.exceptions import HTTPError
+
+from charmhelpers.core.hookenv import local_unit, log
+
+from reactive.component import ConfigComponent, DockerComponent
+from reactive.config import Config, required
+
+
+class Autoscaler(DockerComponent, ConfigComponent):
+ """
+ This class includes the specific instructions and manages the necesssary
+ lifecycle operations of the Autoscaler component.
+
+ :param cfg: The charm configuration
+ :type cfg: dict
+ :param tag: Docker image tag
+ :type tag: str
+ """
+ def __init__(self, cfg, tag):
+ self.unit_id = local_unit().replace('/', '-')
+ super().__init__("autoscaler", cfg["port_autoscaler"], {
+ "initialize": "autoscaler/instances",
+ "status": "autoscaler/instances/{}/status".format(self.unit_id),
+ "configure": "autoscaler/instances/{}/config".format(self.unit_id),
+ "start": "autoscaler/instances/{}/start".format(self.unit_id),
+ "stop": "autoscaler/instances/{}/stop".format(self.unit_id)
+ }, tag=tag)
+
+ def compose(self, *args, **kwargs):
+ """
+ Generates and runs the Autoscaler's Docker compose file.
+
+ :raises: component.DockerComponentUnhealthy
+ """
+ self.compose_config.extend(lambda: {"port": self.port})
+ super().compose()
+
+ def initialize(self):
+ """
+ Render a blueprint configuration and launch an instance using said
+ blueprint.
+
+ :raises: requests.exceptions.RequestException
+ """
+ blueprint_config = Config("blueprint.json", self.name)
+
+ blueprint_config.extend(lambda: {
+ "id": self.unit_id
+ })
+
+ blueprint_config.render()
+
+ with blueprint_config.open() as config_file:
+ try:
+ self.send_request("initialize", method="POST",
+ headers={"content-type": "application/json"},
+ data=config_file, data_type="file")
+ except HTTPError as err:
+ def is_ready():
+ """
+ Checks if the Autoscaler server is ready.
+ """
+ try:
+ self.send_request("status")
+ except HTTPError as err:
+ log("Autoscaler status request error: {}".format(err))
+ return False
+ return True
+
+ # Check if the Autoscaler instance already has been created
+ if err.response.status_code != 400 or not is_ready():
+ raise err
+
+ def configure(self, cfg, influxdb):
+ """
+ Generate the Autoscaler-specfic config values and configure the
+ instance.
+
+ :param cfg: The charm configuration
+ :type cfg: dict
+ :param influxdb: InfluxDB information
+ :type influxdb: dict
+ :raises: config.ConfigurationException
+ :raises: requests.exceptions.RequestException
+ """
+ self.config.extend(autoscaler_config, cfg, influxdb)
+ super().configure()
+
+ def start(self):
+ """
+ Start the Autoscaler.
+
+ :raises: requests.exceptions.RequestException
+ """
+ self.send_request("start", method="POST")
+
+ def stop(self):
+ """
+ Stop the Autoscaler.
+
+ :raises: requests.exceptions.RequestException
+ """
+ self.send_request("stop", method="POST")
+
+
+def alerts_config(cfg):
+ """
+ Generates the alerts config dict.
+
+ :param cfg: The charm configuration
+ :type cfg: dict
+ :returns: dict with alert configuration options
+ """
+ if not required(cfg, "alert_enabled"):
+ return None
+
+ return {
+ "recipients": required(cfg, "alert_receivers").split(),
+ "levels": required(cfg, "alert_levels").split(),
+ "sender": required(cfg, "alert_sender"),
+ "smtp": {
+ "host": required(cfg, "alert_smtp_host"),
+ "port": required(cfg, "alert_smtp_port"),
+ "ssl": required(cfg, "alert_smtp_ssl"),
+ "username": required(cfg, "alert_smtp_username"),
+ "password": required(cfg, "alert_smtp_password")
+ }
+ }
+
+
+def influxdb_config(influxdb):
+ """
+ Returns the InfluxDB relation data as dict to use in config.
+
+ :param influxdb: InfluxDB relation data object
+ :type influxdb: InfluxdbClient
+ :returns: dict with InfluxDB configuration options
+ """
+ return {
+ "host": influxdb.hostname(),
+ "port": influxdb.port(),
+ "username": influxdb.user(),
+ "password": influxdb.password(),
+ }
+
+
+def autoscaler_config(cfg, influxdb):
+ """
+ Generates the Autoscaler's config dict.
+
+ :param influxdb: InfluxDB relation data object
+ :type influxdb: InfluxdbClient
+ :returns: dict with the Autoscaler's configuration
+ """
+ return {
+ "name": "{} Autoscaler".format(required(cfg, "name")),
+ "alert": alerts_config(cfg),
+ "influxdb": influxdb_config(influxdb),
+ "metric": {
+ "poll_interval": required(cfg, "metric_poll_interval"),
+ "data_settling_interval":
+ required(cfg, "metric_data_settling_interval")
+ },
+ "scaling": {
+ "min_units": required(cfg, "scaling_units_min"),
+ "max_units": required(cfg, "scaling_units_max"),
+ "interval": required(cfg, "scaling_interval"),
+ # Since we're currently looking at the idle CPU value, the
+ # usage thresholds are in reverse.
+ "threshold_upscale": (100 - required(cfg, "scaling_cpu_max")),
+ "threshold_downscale": (100 - required(cfg, "scaling_cpu_min")),
+ "period_upscale": required(cfg, "scaling_period_upscale"),
+ "period_downscale": required(cfg, "scaling_period_downscale"),
+ "cooldown": required(cfg, "scaling_cooldown")
+ },
+ "cloudpool": {
+ "url": required(cfg, "charmpool_url")
+ }
+ }
diff --git a/reactive/charmpool.py b/reactive/charmpool.py
new file mode 100644
index 0000000..0828b8a
--- /dev/null
+++ b/reactive/charmpool.py
@@ -0,0 +1,44 @@
+from reactive.component import DockerComponent
+from reactive.config import required
+
+
+class Charmpool(DockerComponent):
+ """
+ The charmpool component.
+
+ :param cfg: The charm configuration
+ :type cfg: dict
+ :param tag: Docker image tag
+ :type tag: str
+ """
+ def __init__(self, cfg, tag):
+ super().__init__("charmpool", tag=tag)
+
+ def compose(self, cfg, application):
+ """
+ Generates and runs the Charmpool's Docker compose file.
+
+ :raises: component.DockerComponentUnhealthy
+ """
+ self.compose_config.extend(compose_config, cfg, application)
+ super().compose()
+
+
+def compose_config(cfg, application):
+ """
+ Generates Charmpool config dict.
+
+ :param cfg: The charm configuration
+ :type cfg: dict
+ :param application: The name of the application that is being autoscaled
+ :type application: str
+ :returns: dict with Charmpool's Docker compose config
+ """
+ config = {
+ "application": application
+ }
+ for option in ("api_endpoint", "model_uuid", "username",
+ "password", "refresh_interval"):
+ key = "juju_{}".format(option)
+ config[key] = required(cfg, key)
+ return config
diff --git a/reactive/charmscaler.py b/reactive/charmscaler.py
new file mode 100644
index 0000000..8f426ae
--- /dev/null
+++ b/reactive/charmscaler.py
@@ -0,0 +1,251 @@
+import os
+
+from requests.exceptions import HTTPError
+
+from charmhelpers.core.hookenv import (application_version_set, config, log,
+ remote_service_name, resource_get,
+ status_set, ERROR)
+from charms.docker import Docker
+from charms.reactive import hook, remove_state, set_state, when, when_not
+
+from reactive.autoscaler import Autoscaler
+from reactive.charmpool import Charmpool
+from reactive.component import (ConfigComponent, DockerComponent,
+ DockerComponentUnhealthy)
+from reactive.config import ConfigurationException
+
+AUTOSCALER_VERSION = "5.0.1"
+CHARMPOOL_VERSION = "0.0.2"
+
+cfg = config()
+
+components = [
+ Charmpool(cfg, tag=CHARMPOOL_VERSION),
+ Autoscaler(cfg, tag=AUTOSCALER_VERSION)
+]
+
+
+def _execute(method, *args, classinfo=None, pre_healthcheck=True, **kwargs):
+ """
+ Helper function to execute the same component-method on all of the charm's
+ components.
+
+ :param method: Name of the method to run
+ :type method: str
+ :param classinfo: Class from which the component needs to be an instance or
+ a subclass of for the method to be called on it.
+ :type classinfo: type
+ :param pre_healthcheck: If True, a Docker healthcheck will be executed on
+ the components before continuing with the normal
+ operation.
+ :type pre_healthcheck: bool
+ :returns: True if no errors occured, else False
+ """
+ try:
+ if pre_healthcheck:
+ _execute("healthcheck", classinfo=DockerComponent,
+ pre_healthcheck=False)
+
+ for component in components:
+ if not classinfo or isinstance(component, classinfo):
+ getattr(component, method)(*args, **kwargs)
+
+ return True
+ except HTTPError as err:
+ try:
+ error_msg = err.response.json()["message"]
+ except Exception:
+ error_msg = str(err)
+ msg = "HTTP error while executing '{}': {}".format(method, error_msg)
+ except ConfigurationException as err:
+ msg = "Error while configuring {}: {}".format(err.config.filename, err)
+ except DockerComponentUnhealthy as err:
+ # An unhealthy component should be seen as not composed
+ _reset()
+ msg = str(err)
+
+ status_set("blocked", msg)
+ log(msg, level=ERROR)
+ return False
+
+
+@when_not("docker.available")
+def wait_for_docker():
+ """
+ Wait for Docker to get installed and start up.
+ """
+ status_set("maintenance", "Installing Docker")
+
+
+def _prepare_volume_directories():
+ """
+ Create directories that are to be mounted as Docker volumes.
+ """
+ # state storage for containers
+ if not os.path.exists('/var/lib/elastisys'):
+ os.makedirs('/var/lib/elastisys')
+ # container log output
+ if not os.path.exists('/var/log/elastisys'):
+ os.makedirs('/var/log/elastisys')
+
+
+@when("docker.available")
+@when_not("charmscaler.installed")
+def install():
+ """
+ Prepare and install the CharmScaler components.
+ """
+ status_set("maintenance", "Installing")
+
+ application_version_set("{}, {}".format(AUTOSCALER_VERSION,
+ CHARMPOOL_VERSION))
+
+ _prepare_volume_directories()
+
+ docker = Docker()
+
+ msg = "Loading Docker images from docker-images resource"
+ log(msg)
+ status_set("maintenance", msg)
+ resource_name = "docker-images"
+ path = resource_get(resource_name)
+ if not path:
+ msg = "Missing resource: {}".format(resource_name)
+ log(msg, level=ERROR)
+ status_set("blocked", msg)
+ return
+
+ docker.load(path)
+
+ set_state("charmscaler.installed")
+
+
+def _reset():
+ """
+ Reset the CharmScaler to a pre-composed state.
+ """
+ remove_state("charmscaler.composed")
+ remove_state("charmscaler.configured")
+ remove_state("charmscaler.started")
+ remove_state("charmscaler.available")
+
+
+@hook("upgrade-charm")
+def reinstall():
+ """
+ Reinstall the CharmScaler on the upgrade-charm hook.
+ """
+ remove_state("charmscaler.installed")
+ _reset()
+ install()
+
+
+@when("charmscaler.installed")
+@when_not("scalable-charm.available")
+def wait_for_scalable_charm():
+ """
+ Wait for a juju-info relation to a charm that is going to be autoscaled.
+ """
+ status_set("blocked", "Waiting for relation to scalable charm")
+
+
+@when("charmscaler.installed")
+@when("scalable-charm.available")
+def compose(scale_relation):
+ """
+ Start all of the Docker components. If the Compose manifest has changed the
+ affected Docker containers will be recreated. This is done at every run.
+
+ :param scale_relation: Relation object for the charm that is going to be
+ autoscaled.
+ :type scale_relation: JujuInfoClient
+ """
+
+ # Reset to re-check configuration and return to available state.
+ _reset()
+
+ class ComposeException(Exception):
+ pass
+
+ try:
+ scale_relation_ids = scale_relation.conversation().relation_ids
+
+ if len(scale_relation_ids) > 1:
+ raise ComposeException("Cannot scale more than one application at "
+ "the same time. Deploy more CharmScalers.")
+
+ # This could happen if the state hasn't been updated yet but the
+ # relation is removed.
+ if len(scale_relation_ids) < 1:
+ raise ComposeException("Scalable charm relation was lost")
+
+ application = remote_service_name(scale_relation_ids[0])
+
+ if _execute("compose", cfg, application, classinfo=DockerComponent,
+ pre_healthcheck=False):
+ set_state("charmscaler.composed")
+ return
+ except ComposeException as err:
+ msg = "Error while composing: {}".format(err)
+
+ status_set("blocked", msg)
+ log(msg, level=ERROR)
+
+
+@when("charmscaler.composed")
+@when_not("charmscaler.initialized")
+def initialize():
+ """
+ Initialize the autoscaler.
+ """
+ if _execute("initialize", classinfo=Autoscaler):
+ set_state("charmscaler.initialized")
+
+
+@when("charmscaler.initialized")
+@when_not("db-api.available")
+def waiting_for_influxdb():
+ """
+ Wait for relation to InfluxDB charm.
+ """
+ status_set("blocked", "Waiting for InfluxDB relation")
+
+
+@when("charmscaler.composed")
+@when("charmscaler.initialized")
+@when("db-api.available")
+@when_not("charmscaler.configured")
+def configure(influxdb):
+ """
+ Configure all of the charm config components. This is done at every run,
+ however, if the config is unchanged nothing happens.
+ """
+ if _execute("configure", cfg, influxdb, classinfo=ConfigComponent):
+ set_state("charmscaler.configured")
+
+
+@when("charmscaler.configured")
+@when_not("charmscaler.started")
+def start():
+ """
+ Start the autoscaler.
+ """
+ if _execute("start", classinfo=Autoscaler):
+ set_state("charmscaler.started")
+
+
+def stop():
+ """
+ Stop the autoscaler.
+ """
+ _execute("stop", classinfo=Autoscaler)
+ _reset()
+
+
+@when("charmscaler.started")
+def available():
+ """
+ We're good to go!
+ """
+ status_set("active", "Available")
+ set_state("charmscaler.available")
diff --git a/reactive/component.py b/reactive/component.py
new file mode 100644
index 0000000..fff3528
--- /dev/null
+++ b/reactive/component.py
@@ -0,0 +1,232 @@
+import os
+
+import backoff
+from requests import Session
+from requests.exceptions import RequestException
+
+from charmhelpers.core.hookenv import log, status_set, DEBUG
+from charms.docker import Compose, Docker
+
+from reactive.config import Config
+from reactive.helpers import log_to_juju
+
+# Maximum number of seconds to wait for container to become healthy.
+# This is mainly important during startup because that can take a while.
+HEALTH_RETRY_LIMIT = 60
+
+# Number of retries for a failed HTTP request.
+HTTP_RETRY_LIMIT = 5
+
+log_to_juju("backoff")
+
+
+class Component:
+ """
+ Base class for all the different components that the charm is managing.
+
+ :param name: Name of the component
+ :type name: str
+ """
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self):
+ return self.name
+
+
+class DockerComponentUnhealthy(Exception):
+ def __init__(self, component):
+ self.component = component
+ super().__init__("Unhealthy component: {} - "
+ "Check the Docker container logs".format(component))
+
+
+class DockerComponent(Component):
+ """
+ A Docker component is used for software which is executed as Docker images.
+ With it you can generate a Docker Compose yaml-manifest and then start
+ the Docker Compose services.
+
+ :param name: Name of the component
+ :type name: str
+ :param tag: Docker image tag
+ :type tag: str
+ :var compose_config: Every Docker component has a :class:`Config` object
+ which is created with the name docker-compose.yml
+ under the folder path named after the component's
+ ':paramref:`name`' parameter.
+ :vartype compose_config: :class:`Config`
+ """
+ def __init__(self, name, *args, tag="latest"):
+ super().__init__(name, *args)
+ self.compose_config = Config("docker-compose.yml", name)
+ self.compose_config.extend(lambda: {"tag": tag})
+
+ @backoff.on_exception(backoff.constant, DockerComponentUnhealthy,
+ max_tries=HEALTH_RETRY_LIMIT, jitter=None)
+ def healthcheck(self):
+ """
+ Healthcheck is used to poll the Docker health state. A health test
+ command need to be specified in the Compose manifest or in the
+ Dockerfile.
+
+ If the Docker container is not healthy a
+ :class:`DockerComponentUnhealthy` is raised. If not, the container is
+ currently considered healthy by the test command.
+
+ :raises: DockerComponentUnhealthy
+ """
+ log("Healthchecking {}".format(self.name), level=DEBUG)
+ if not Docker().healthcheck(self.name):
+ raise DockerComponentUnhealthy(self)
+
+ def _up(self):
+ compose = Compose(os.path.dirname(str(self.compose_config)))
+ compose.up()
+
+ def compose(self):
+ """
+ Generate, render and (re)start the component's Docker Compose services.
+ """
+ # TODO Would be nice to have support for multiple compose files and/or
+ # the project flag in charms.docker.
+ #
+ # Dotfiles are ignored when creating a charm archive to push to the
+ # charmstore. We need to generate the .env files during runtime.
+ compose_env = Config("dotenv", "common", "{}/.env".format(self.name))
+ if not compose_env.exists():
+ compose_env.extend(lambda: {"name": "charmscaler"})
+ compose_env.render()
+
+ # The upgrade-charm hook requires files to be rendered again - make
+ # sure the compose files exists.
+ if (not self.compose_config.exists() or
+ self.compose_config.has_changed()):
+ msg = "Rendering Docker Compose manifest for {}".format(self)
+ status_set("maintenance", msg)
+ log(msg)
+
+ self.compose_config.render()
+
+ self.compose_config.commit()
+
+ self._up()
+
+ # Healthcheck after the Docker containers have been (re)started
+ self.healthcheck()
+
+
+class HTTPComponent(Component):
+ """
+ Components with a HTTP REST API.
+
+ :param name: Name of the component
+ :type name: str
+ :param port: The port the component is supposed to listen on
+ :type port: int
+ :param paths: The REST API URL paths. The paths are dependent on which
+ operations the component is capable of.
+ :type paths: dict
+ """
+ def __init__(self, name, port, paths):
+ super().__init__(name)
+ self.port = port
+ self.paths = paths
+
+ self._session = Session()
+
+ def _get_url(self, path):
+ try:
+ return "http://localhost:{port}/{path}".format(
+ port=self.port,
+ path=self.paths[path]
+ )
+ except KeyError:
+ msg = "Missing REST API path '{}' for {}".format(path, self.name)
+ raise NotImplementedError(msg)
+
+ @backoff.on_exception(backoff.expo, RequestException,
+ max_tries=HTTP_RETRY_LIMIT)
+ def send_request(self, path, method="GET", headers=None, data=None,
+ data_type="json"):
+ """
+ Send requests to the REST API of the component.
+
+ :param path: REST API path
+ :param method: Request method to use. Default: GET
+ :param headers: Request headers
+ :param data: Request data
+ :param data_type: Request data type. Default: json
+ :returns: requests.Response
+ :raises: requests.exceptions.RequestException
+ """
+ url = self._get_url(path)
+
+ if method == "GET":
+ response = self._session.get(url, headers=headers)
+ elif method == "POST":
+ if data_type == "json":
+ response = self._session.post(url, headers=headers, json=data)
+ elif data_type == "file":
+ # Start from the beginning if this has already been read, for
+ # example during a retry
+ data.seek(0)
+ response = self._session.post(url, headers=headers, data=data)
+ else:
+ raise Exception("Unhandeled data type: {}".format(data_type))
+ else:
+ raise Exception("Unhandeled REST API verb: {}".format(method))
+
+ log("Request URL: {}".format(url), level=DEBUG)
+ log("Response status: {}".format(response.status_code), level=DEBUG)
+ log("Response data: {}".format(response.text), level=DEBUG)
+
+ response.raise_for_status()
+
+ return response
+
+
+class ConfigComponent(HTTPComponent):
+ """
+ A config component have the capabilities of generating and rendering a
+ configuration file from a specific template file.
+
+ :param name: Name of the component
+ :type name: str
+ :param port: The port the component is supposed to listen on
+ :type port: int
+ :param paths: The REST API URL paths. The paths are dependent on which
+ operations the component is capable of.
+ :type paths: dict
+ :var config: Every config component has a :class:`Config` object which is
+ created with the name config.json under the folder path named
+ after the component's ':paramref:`name`' parameter.
+ :vartype config: :class:`Config`
+ """
+ def __init__(self, name, port, paths):
+ super().__init__(name, port, paths)
+ self.config = Config("config.json", name)
+
+ def configure(self):
+ """
+ Generate, render and push a new config to the component.
+
+ If the config is unchanged nothing happens.
+
+ On errors, :class:`config.ConfigurationException` is raised.
+
+ :raises: config.ConfigurationException,
+ requests.exceptions.RequestException
+ """
+ if not self.config.exists() or self.config.has_changed():
+ msg = "Configuring {}".format(self)
+ status_set("maintenance", msg)
+ log(msg)
+
+ self.config.render()
+
+ with self.config.open() as config_file:
+ self.send_request("configure", method="POST",
+ headers={"content-type": "application/json"},
+ data=config_file, data_type="file")
+ self.config.commit()
diff --git a/reactive/config.py b/reactive/config.py
new file mode 100644
index 0000000..50785ff
--- /dev/null
+++ b/reactive/config.py
@@ -0,0 +1,116 @@
+import os
+
+from charmhelpers.core.hookenv import charm_dir
+from charmhelpers.core.templating import render
+
+from reactive.helpers import data_changed, data_commit
+
+CONFIG_PATH = "files"
+
+
+class ConfigurationRequiredException(Exception):
+ def __init__(self, key):
+ super().__init__(key)
+
+
+def required(cfg, key):
+ """
+ Use this method to make the extend function raise an error if the specified
+ configuration option is empty.
+
+ :param cfg: The charm config
+ :type cfg: dict
+ :param key: The specific charm config option
+ :type key: str
+ """
+ value = cfg[key]
+ # False and 0 are valid values for a required option
+ if not value and value not in (False, 0):
+ raise ConfigurationRequiredException(key)
+ return value
+
+
+class ConfigurationException(Exception):
+ """
+ Exception raised for errors while generating, rendering or configuring a
+ component.
+
+ :param config: The :class:`Config` this exception refers to
+ :type config: :class:`Config`
+ :param message: Exception message
+ :type message: str
+ """
+ def __init__(self, config, message):
+ super().__init__(message)
+ self.config = config
+
+
+class Config:
+ """
+ The purpose of this class is to handle all config related operations. This
+ includes generating the config data and rendering the config file from a
+ template.
+
+ :param name: The name of the config
+ :type name: str
+ :var filename: Filename of the config template file
+ :vartype filename: str
+ :var path: Path to the config file
+ :vartype path: str
+ """
+ def __init__(self, filename, path, target=None):
+ self._config = {}
+
+ self.filename = filename
+ self.path = path
+ self.template = os.path.join(self.path, self.filename)
+ if target:
+ self.target = os.path.join(charm_dir(), CONFIG_PATH, target)
+ else:
+ self.target = os.path.join(charm_dir(), CONFIG_PATH, self.template)
+
+ self.unitdata_key = "charmscaler.config.{}.{}".format(self.path,
+ self.filename)
+
+ def __str__(self):
+ return self.target
+
+ def extend(self, func, *args):
+ """
+ Add more configuration data through a generator function which creates
+ a dictionary with the config values in place.
+
+ :param func: The config generator function
+ :type func: function
+ :param *args: Extra arguments to the generator function
+ """
+ try:
+ self._config.update(func(*args))
+ except ConfigurationRequiredException as err:
+ msg = "Config option '{}' cannot be empty".format(err)
+ raise ConfigurationException(self, msg)
+
+ def has_changed(self):
+ """
+ Check if this config has changed in the unit data store.
+ """
+ return data_changed(self.unitdata_key, self._config)
+
+ def commit(self):
+ """
+ Commit the current config to the unit data store.
+ """
+ data_commit(self.unitdata_key, self._config)
+
+ def render(self):
+ """
+ Render the configuration data to the configuration file located at
+ `path` class variable.
+ """
+ render(self.template, self.target, self._config)
+
+ def open(self, mode='rb'):
+ return open(self.target, mode)
+
+ def exists(self):
+ return os.path.isfile(self.target)
diff --git a/reactive/helpers.py b/reactive/helpers.py
new file mode 100644
index 0000000..e89b6fa
--- /dev/null
+++ b/reactive/helpers.py
@@ -0,0 +1,46 @@
+import hashlib
+import json
+import logging
+
+from charmhelpers.core import unitdata
+from charmhelpers.core.hookenv import log as juju_log
+
+
+def data_changed(data_id, data, hash_type='md5'):
+ """
+ Similar to the data_changed function in charms.reactive.helpers but without
+ the kv().set step. Usable when you don't want the data to be updated until
+ later on. For example to make sure the data is only updated when a task has
+ finished successfully.
+ """
+ key = 'reactive.data_changed.%s' % data_id
+ alg = getattr(hashlib, hash_type)
+ serialized = json.dumps(data, sort_keys=True).encode('utf8')
+ old_hash = unitdata.kv().get(key)
+ new_hash = alg(serialized).hexdigest()
+ return old_hash != new_hash
+
+
+def data_commit(data_id, data, hash_type='md5'):
+ """
+ Used in conjunction with data_changed() to update the changes in the
+ datastore.
+ """
+ key = 'reactive.data_changed.%s' % data_id
+ alg = getattr(hashlib, hash_type)
+ serialized = json.dumps(data, sort_keys=True).encode('utf8')
+ new_hash = alg(serialized).hexdigest()
+ unitdata.kv().set(key, new_hash)
+
+
+def log_to_juju(name):
+ """
+ Forward logging to the Juju log
+ """
+ class JujuHandler(logging.Handler):
+ def emit(self, record):
+ log_entry = self.format(record)
+ juju_log(log_entry, level=record.levelname)
+ log = logging.getLogger(name)
+ log.setLevel(logging.DEBUG)
+ log.addHandler(JujuHandler())
diff --git a/templates/autoscaler/blueprint.json b/templates/autoscaler/blueprint.json
new file mode 100644
index 0000000..7a1d0cc
--- /dev/null
+++ b/templates/autoscaler/blueprint.json
@@ -0,0 +1,3 @@
+{
+ "id": "{{ id }}"
+}
diff --git a/templates/autoscaler/config.json b/templates/autoscaler/config.json
new file mode 100644
index 0000000..319c2ee
--- /dev/null
+++ b/templates/autoscaler/config.json
@@ -0,0 +1,105 @@
+{
+ {% if alert %}
+ "alerter": {
+ {% include "common/alert-config.json" %}
+ },
+ {% endif %}
+ "monitoringSubsystem": {
+ "metricStreamer": {
+ "type": "InfluxdbMetricStreamer",
+ "config": {
+ "host": "{{ influxdb.host }}",
+ "port": {{ influxdb.port }},
+ "security": {
+ "auth": {
+ "username": "{{ influxdb.username }}",
+ "password": "{{ influxdb.password }}"
+ }
+ },
+ "pollInterval": {
+ "time": {{ metric.poll_interval }},
+ "unit": "seconds"
+ },
+ "metricStreams": [
+ {
+ "id": "average.cpu.percent.stream",
+ "metricName": "cpu.percent",
+ "database": "telegraf",
+ "measurement": "cpu",
+ "fieldKey": "usage_idle",
+ "function": "MEAN",
+ "downsample": { "time": 10, "unit": "seconds" },
+ "dataSettlingTime": {
+ "time": {{ metric.data_settling_interval }},
+ "unit": "seconds"
+ }
+ }
+ ]
+ }
+ },
+ "systemHistorian": {
+ "type": "InfluxdbSystemHistorian",
+ "config": {
+ "host": "{{ influxdb.host }}",
+ "port": {{ influxdb.port }},
+ "security": {
+ "auth": {
+ "username": "{{ influxdb.username }}",
+ "password": "{{ influxdb.password }}"
+ }
+ },
+ "database": "statsdb",
+ "reportingInterval": { "time": 10, "unit": "seconds" }
+ }
+ }
+ },
+ "metronome": {
+ "horizon": { "time": 1, "unit": "seconds" },
+ "interval": { "time": {{ scaling.interval }}, "unit": "seconds" }
+ },
+ "predictionSubsystem": {
+ "predictors": [{
+ "id": "p1",
+ "type": "RuleBasedPredictor",
+ "metricStream": "average.cpu.percent.stream",
+ "parameters": {
+ "cooldownPeriod": {
+ "time": {{ scaling.cooldown }},
+ "unit": "seconds"
+ },
+ "scalingRules": [
+ {
+ "condition": "BELOW",
+ "threshold": {{ scaling.threshold_upscale }},
+ "period": {
+ "time": {{ scaling.period_upscale }},
+ "unit": "seconds"
+ },
+ "resize": "1",
+ "unit": "INSTANCES"
+ },
+ {
+ "condition": "ABOVE",
+ "threshold": {{ scaling.threshold_downscale }},
+ "period": {
+ "time": {{ scaling.period_downscale }},
+ "unit": "seconds"
+ },
+ "resize": "-1",
+ "unit": "INSTANCES"
+ }
+ ]
+ }
+ }],
+ "capacityLimits": [{
+ "id": "baseline",
+ "rank": 1,
+ "schedule": "* * * * * ? *",
+ "min": {{ scaling.min_units }},
+ "max": {{ scaling.max_units }}
+ }]
+ },
+ "cloudPool": {
+ "cloudPoolUrl": "{{ cloudpool.url }}"
+ }
+}
diff --git a/templates/autoscaler/docker-compose.yml b/templates/autoscaler/docker-compose.yml
new file mode 100644
index 0000000..e17c287
--- /dev/null
+++ b/templates/autoscaler/docker-compose.yml
@@ -0,0 +1,19 @@
+version: "2"
+
+services:
+ autoscaler:
+ container_name: "autoscaler"
+ extends:
+ file: "../docker-compose-base.yml"
+ service: "_base"
+ image: "elastisys/charmscaler-restricted:{{ tag }}"
+ volumes:
+ - "/var/log/elastisys:/var/log/elastisys"
+ - "/var/lib/elastisys:/var/lib/elastisys"
+ environment:
+ - "HTTP_PORT=80"
+ - "JVM_OPTS=-Xmx128m"
+ - "STORAGE_DIR=/var/lib/elastisys/autoscaler"
+ - "LOG_DIR=/var/log/elastisys/autoscaler"
+ ports:
+ - "{{ port }}:80"
diff --git a/templates/charmpool/docker-compose.yml b/templates/charmpool/docker-compose.yml
new file mode 100644
index 0000000..96a6723
--- /dev/null
+++ b/templates/charmpool/docker-compose.yml
@@ -0,0 +1,17 @@
+version: "2"
+
+services:
+ charmpool:
+ container_name: "charmpool"
+ extends:
+ file: "../docker-compose-base.yml"
+ service: "_base"
+ image: "elastisys/charmpool:{{ tag }}"
+ environment:
+ - "API_ENDPOINT={{ juju_api_endpoint }}"
+ - "MODEL_UUID={{ juju_model_uuid }}"
+ - "USERNAME={{ juju_username }}"
+ - "PASSWORD={{ juju_password }}"
+ - "APPLICATION={{ application }}"
+ - "PORT=80"
+ - "REFRESH_INTERVAL={{ juju_refresh_interval }}"
diff --git a/templates/common/alert-config.json b/templates/common/alert-config.json
new file mode 100644
index 0000000..1a27c3a
--- /dev/null
+++ b/templates/common/alert-config.json
@@ -0,0 +1,24 @@
+"duplicateSuppression": { "time": 2, "unit": "hours" },
+"smtp": [
+ {
+ "subject": "[{{ name }}] alert",
+ "recipients": [
+ "{{ alert.recipients|join('","') }}"
+ ],
+ "sender": "{{ alert.sender }}",
+ "severityFilter": "{{ alert.levels|join('|') }}",
+ "smtpClientConfig": {
+ "smtpHost": "{{ alert.smtp.host }}",
+ "smtpPort": {{ alert.smtp.port }},
+ "useSsl": {{ alert.smtp.ssl|lower }},
+ {% if alert.smtp.username or alert.smtp.password %}
+ "authentication": {
+ "username": "{{ alert.smtp.username }}",
+ "password": "{{ alert.smtp.password }}"
+ }
+ {% else %}
+ "authentication": null
+ {% endif %}
+ }
+ }
+]
diff --git a/templates/common/dotenv b/templates/common/dotenv
new file mode 100644
index 0000000..a45f249
--- /dev/null
+++ b/templates/common/dotenv
@@ -0,0 +1 @@
+COMPOSE_PROJECT_NAME={{ name }}
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 0000000..037fc0d
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,4 @@
+amulet
+juju
+juju-deployer
+requests
diff --git a/tests/10-deploy.py b/tests/10-deploy.py
new file mode 100755
index 0000000..faae8a7
--- /dev/null
+++ b/tests/10-deploy.py
@@ -0,0 +1,257 @@
+#!/usr/bin/env python3.5
+import amulet
+from amulet_utils import attach_resource, has_resource
+import asyncio
+import json
+from juju.errors import JujuAPIError
+from juju.model import Model
+from juju.client.connection import JujuData
+import logging
+import os
+import re
+import requests
+import unittest
+
+log = logging.getLogger(__name__)
+
+SCALABLE_CHARM = "ubuntu"
+
+
+def download_resource(url):
+ resource_path = "/tmp/charmscaler-docker-images.tar"
+
+ r = requests.get(url, stream=True)
+ r.raise_for_status()
+
+ log.info("Downloading resource {} to {}".format(url, resource_path))
+
+ with open(resource_path, 'wb') as f:
+ for chunk in r.iter_content(chunk_size=1024):
+ if chunk:
+ f.write(chunk)
+
+ return resource_path
+
+
+def _get_juju_credentials():
+ jujudata = JujuData()
+
+ controller_name = jujudata.current_controller()
+
+ controller = jujudata.controllers()[controller_name]
+ endpoint = controller["api-endpoints"][0]
+
+ models = jujudata.models()[controller_name]
+ model_name = models["current-model"]
+ model_uuid = models["models"][model_name]["uuid"]
+
+ accounts = jujudata.accounts()[controller_name]
+ username = accounts["user"]
+ password = accounts.get("password")
+
+ return {
+ "endpoint": endpoint,
+ "model_uuid": model_uuid,
+ "username": username,
+ "password": password
+ }
+
+
+class TestCharm(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.loop = asyncio.get_event_loop()
+
+ cls.d = amulet.Deployment(series="xenial")
+
+ credentials = _get_juju_credentials()
+
+ cls.d.add("charmscaler")
+
+ cls.d.configure("charmscaler", {
+ "juju_api_endpoint": credentials["endpoint"],
+ "juju_model_uuid": credentials["model_uuid"],
+ "juju_username": credentials["username"],
+ "juju_password": credentials["password"],
+ "scaling_units_min": 1,
+ "scaling_units_max": 1
+ })
+
+ cls.d.add("influxdb", charm="cs:~chris.macnaughton/influxdb")
+ cls.d.add("telegraf")
+ cls.d.add(SCALABLE_CHARM)
+
+ cls.d.relate("charmscaler:db-api", "influxdb:api")
+ cls.d.relate("telegraf:influxdb-api", "influxdb:api")
+ cls.d.relate("telegraf:juju-info",
+ "{}:juju-info".format(SCALABLE_CHARM))
+ cls.d.relate("charmscaler:scalable-charm",
+ "{}:juju-info".format(SCALABLE_CHARM))
+
+ try:
+ cls.d.setup(timeout=900)
+ cls.d.sentry.wait()
+ except amulet.helpers.TimeoutError:
+ message = "Environment wasn't stood up in time"
+ amulet.raise_status(amulet.SKIP, msg=message)
+
+ if not has_resource("charmscaler", "docker-images"):
+ resource = os.getenv("CHARMSCALER_RESOURCE")
+ if resource:
+ try:
+ cls.resource_path = download_resource(resource)
+ except requests.exceptions.RequestException:
+ if os.path.isfile(resource):
+ cls.resource_path = resource
+ else:
+ message = "resource '{}' not found".format(resource)
+ amulet.raise_status(amulet.FAIL, msg=message)
+ else:
+ url = ("https://api.jujucharms.com/charmstore/v5/"
+ "~elastisys/charmscaler/resource/docker-images")
+ cls.resource_path = download_resource(url)
+ attach_resource("charmscaler", "docker-images", cls.resource_path)
+
+ try:
+ cls.d.sentry.wait_for_messages({"charmscaler": "Available"})
+ except amulet.helpers.TimeoutError:
+ message = "CharmScaler charm did not become available in time"
+ amulet.raise_status(amulet.FAIL, msg=message)
+
+ def _configure(self, config):
+ self.d.configure("charmscaler", config)
+ try:
+ self.d.sentry.wait_for_messages({"charmscaler": "Available"})
+ except amulet.helpers.TimeoutError:
+ message = "Timeout configuring charmscaler: {}".format(config)
+ amulet.raise_status(amulet.FAIL, msg=message)
+
+ async def _wait_for_unit_count(self, expected_units, timeout=300):
+ m = Model()
+ await m.connect_current()
+ try:
+ for i in amulet.helpers.timeout_gen(timeout):
+ actual_units = len(m.applications[SCALABLE_CHARM].units)
+ if actual_units == expected_units:
+ break
+ await asyncio.sleep(0)
+ finally:
+ await m.disconnect()
+
+ def _manual_scale(self, unit_count):
+ log.info("Scaling '{}' to {} unit(s)...".format(SCALABLE_CHARM,
+ unit_count))
+
+ self._configure({
+ "scaling_units_min": unit_count,
+ "scaling_units_max": unit_count
+ })
+
+ try:
+ self.loop.run_until_complete(self._wait_for_unit_count(unit_count))
+ except amulet.helpers.TimeoutError:
+ msg = ("The CharmScaler did not scale the application '{}' to {} "
+ "unit(s) in time.").format(SCALABLE_CHARM, unit_count)
+ amulet.raise_status(amulet.FAIL, msg=msg)
+ except JujuAPIError as e:
+ msg = ("Juju API error: {}").format(str(e))
+ amulet.raise_status(amulet.FAIL, msg=msg)
+
+ def test_scaling(self):
+ self._manual_scale(2)
+ self._manual_scale(4)
+ self._manual_scale(1)
+
+ def test_restricted(self):
+ self.d.configure("charmscaler", {
+ "scaling_units_max": 5
+ })
+ try:
+ self.d.sentry.wait_for_messages({
+ "charmscaler":
+ re.compile(r"Refusing to set a capacity limit max value")
+ })
+ except amulet.helpers.TimeoutError:
+ message = "Never got restricted status message from charmscaler"
+ amulet.raise_status(amulet.FAIL, msg=message)
+
+ self._configure({
+ "scaling_units_max": 4
+ })
+
+ def _run_action(self, action, action_args):
+ charmscaler = self.d.sentry["charmscaler"][0]
+ action_id = charmscaler.run_action(action, action_args=action_args)
+ log.info("Running action {} with ID {}".format(action, action_id))
+
+ try:
+ output = self.d.action_fetch(action_id, raise_on_timeout=True,
+ full_output=True)
+
+ message = "" if "message" not in output else output["message"]
+ self.assertEqual(output["status"], "completed", message)
+
+ return None if "results" not in output else output["results"]
+ except amulet.helpers.TimeoutError:
+ message = "Timeout while executing action {}".format(action)
+ amulet.raise_status(amulet.FAIL, msg=message)
+
+ def test_alert_mails(self):
+ ports = self._run_action("smtpserver", {
+ "operation": "start"
+ })["ports"]
+ self.addCleanup(self._run_action, "smtpserver", {
+ "operation": "stop",
+ "port": ports["rpc"]
+ })
+ self.addCleanup(self._configure, {
+ "alert_enabled": False,
+ "charmpool_url": "http://charmpool:80"
+ })
+
+ # The SMTP server is running on the host while the autoscaler is
+ # running inside a Docker container
+ docker_inspect_output = json.loads(self._run_action("docker-inspect", {
+ "container": "autoscaler"
+ })["output"])
+ docker_host_ip = (docker_inspect_output[0]["NetworkSettings"]
+ ["Networks"]["charmscaler_default"]["Gateway"])
+
+ self._configure({
+ "alert_enabled": True,
+ "alert_levels": "INFO NOTICE WARN ERROR FATAL",
+ "alert_receivers": "foo@charmscaler",
+ "alert_sender": "bar@charmscaler",
+ "alert_smtp_host": docker_host_ip,
+ "alert_smtp_port": ports["smtp"],
+ "alert_smtp_ssl": False,
+ "alert_smtp_username": "foo",
+ "alert_smtp_password": "bar"
+ })
+
+ self._configure({
+ "charmpool_url": "http://not-charmpool:1234"
+ })
+
+ for i in amulet.helpers.timeout_gen(300):
+ count = self._run_action("smtpserver", {
+ "operation": "inboxcount",
+ "port": ports["rpc"]
+ })["count"]
+
+ if int(count) > 0:
+ break
+
+ @classmethod
+ def tearDownClass(cls):
+ for task in asyncio.Task.all_tasks():
+ task.cancel()
+ cls.loop.close()
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger("websocket").setLevel(logging.WARNING)
+ logging.getLogger("websockets.protocol").setLevel(logging.WARNING)
+ logging.getLogger("deployer").setLevel(logging.WARNING)
+ unittest.main()
diff --git a/tests/amulet_utils.py b/tests/amulet_utils.py
new file mode 100644
index 0000000..ee68e89
--- /dev/null
+++ b/tests/amulet_utils.py
@@ -0,0 +1,37 @@
+# Creds to https://github.com/juju-solutions/bundle-canonical-kubernetes/blob/master/tests/amulet_utils.py # noqa
+
+import subprocess
+import yaml
+
+
+def attach_resource(charm, resource, resource_path):
+ ''' Upload a resource to a deployed model.
+ :param: charm - the application to attach the resource
+ :param: resource - The charm's resouce key
+ :param: resource_path - the path on disk to upload the
+ resource'''
+
+ # the primary reason for this method is to replace a shell
+ # script in the $ROOT dir of the charm
+ cmd = ['juju', 'attach', charm, "{}={}".format(resource, resource_path)]
+
+ # Poll the controller to determine if resource placement is needed
+ if not has_resource(charm, resource):
+ subprocess.call(cmd)
+
+
+def has_resource(charm, resource):
+ ''' Poll the controller to determine if we need to upload a resource
+ '''
+ cmd = ['juju', 'resources', charm, '--format=yaml']
+ output = subprocess.check_output(cmd)
+ resource_list = yaml.safe_load(output)
+ for resource in resource_list['resources']:
+ # We can assume this is the correct resource if it has a filesize
+ # matches the name of the resource in the charms resource stream
+ if 'name' in resource and (charm in resource['name'] and
+ resource['size'] > 0):
+ # Display the found resource
+ print('Uploading {} for {}'.format(resource['name'], charm))
+ return True
+ return False
diff --git a/tests/tests.yaml b/tests/tests.yaml
new file mode 100644
index 0000000..5712a65
--- /dev/null
+++ b/tests/tests.yaml
@@ -0,0 +1,10 @@
+reset: true
+reset_timeout: 300
+tests: "[0-9]*"
+makefile:
+ - lint
+ - unit_test
+virtualenv: true
+virtualenv_python: python3.5
+requirements:
+ - test-requirements.txt
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..7a1c890
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,21 @@
+[tox]
+envlist = py35
+skipsdist=True
+
+[testenv]
+deps =
+ backoff
+ charmhelpers
+ charms.docker
+ pytest
+ requests
+ requests_mock
+
+commands = py.test {posargs}
+
+[testenv:lint]
+basepython = python3.5
+deps = flake8
+
+commands = flake8 {toxinidir}/actions {toxinidir}/reactive \
+ {toxinidir}/tests {toxinidir}/unit_tests
diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/unit_tests/test_autoscaler.py b/unit_tests/test_autoscaler.py
new file mode 100644
index 0000000..c411606
--- /dev/null
+++ b/unit_tests/test_autoscaler.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+
+from requests.exceptions import RequestException
+import requests_mock
+import unittest
+import unittest.mock as mock
+
+# Disable backoff retry calls
+import backoff
+def noop_decorator(wait_gen, exception, max_tries=None, jitter=None): # noqa
+ def decorator(f):
+ return f
+ return decorator
+backoff.on_exception = noop_decorator # noqa
+
+from reactive.autoscaler import Autoscaler
+
+
+class TestAutoscaler(unittest.TestCase):
+ @classmethod
+ @mock.patch.dict("os.environ", {
+ "JUJU_UNIT_NAME": "openstackscaler/1",
+ "CHARM_DIR": "/tmp"
+ })
+ @mock.patch("reactive.component.Config")
+ @mock.patch("reactive.component.Compose")
+ def setUpClass(cls, mock_compose, mock_config):
+ cls.autoscaler = Autoscaler({
+ "name": "OpenStackScaler",
+ "port_autoscaler": 8080
+ }, "latest")
+
+ def setUp(self):
+ patcher = mock.patch("reactive.component.HTTP_RETRY_LIMIT")
+ self.addCleanup(patcher.stop)
+ retry_limit_patch = patcher.start()
+ retry_limit_patch.return_value = 0
+
+ @requests_mock.mock()
+ @mock.patch("reactive.autoscaler.Config")
+ def test_initialize(self, mock_req, mock_config):
+ initialize_url = self.autoscaler._get_url("initialize")
+ instance_status_url = self.autoscaler._get_url("status")
+
+ # OK initalization
+ mock_req.post(initialize_url, status_code=200)
+ self.autoscaler.initialize()
+ self.assertEqual(mock_req.call_count, 1)
+
+ # Initialization error
+ mock_req.post(initialize_url, status_code=500)
+ self.assertRaises(RequestException,
+ self.autoscaler.initialize)
+ self.assertEqual(mock_req.call_count, 2)
+
+ # Instance already created
+ mock_req.post(initialize_url, status_code=400)
+ mock_req.get(instance_status_url, status_code=200)
+ self.autoscaler.initialize()
+ self.assertEqual(mock_req.call_count, 4)
+
+ # Instance already created with error on status lookup
+ mock_req.post(initialize_url, status_code=400)
+ mock_req.get(instance_status_url, status_code=500)
+ self.assertRaises(RequestException,
+ self.autoscaler.initialize)
+ self.assertEqual(mock_req.call_count, 6)
+
+ @requests_mock.mock()
+ def test_start(self, mock_req):
+ url = self.autoscaler._get_url("start")
+
+ # Start OK
+ mock_req.post(url, status_code=200)
+ self.autoscaler.start()
+ self.assertEqual(mock_req.call_count, 1)
+
+ # Start error
+ mock_req.post(url, status_code=500)
+ self.assertRaises(RequestException, self.autoscaler.start)
+ self.assertEqual(mock_req.call_count, 2)
+
+ @requests_mock.mock()
+ def test_stop(self, mock_req):
+ url = self.autoscaler._get_url("stop")
+
+ # Stop OK
+ mock_req.post(url, status_code=200)
+ self.autoscaler.stop()
+ self.assertEqual(mock_req.call_count, 1)
+
+ # Stop error
+ mock_req.post(url, status_code=500)
+ self.assertRaises(RequestException, self.autoscaler.stop)
+ self.assertEqual(mock_req.call_count, 2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/unit_tests/test_component.py b/unit_tests/test_component.py
new file mode 100644
index 0000000..6749d4c
--- /dev/null
+++ b/unit_tests/test_component.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+from requests.exceptions import RequestException
+import requests_mock
+import tempfile
+import unittest
+import unittest.mock as mock
+
+from reactive.component import ConfigComponent, DockerComponent, HTTPComponent
+
+
+class TestDockerComponent(unittest.TestCase):
+ @classmethod
+ @mock.patch("reactive.component.Config")
+ def setUpClass(cls, mock_config):
+ cls.component = DockerComponent("test-component")
+
+ def test_healthcheck(self):
+ # Not much to test at the moment. Important test in charms.docker.
+ pass
+
+ @mock.patch("reactive.component.Config")
+ def test_compose(self, mock_config):
+ self.component._up = mock.MagicMock()
+ self.component.healthcheck = mock.MagicMock()
+
+ # Normal configuration procedure when config has changed
+ self.component.compose_config.has_changed.return_value = True
+ self.component.compose()
+ self.assertTrue(self.component.compose_config.has_changed.called)
+ self.assertTrue(self.component.compose_config.render.called)
+ self.assertTrue(self.component.compose_config.commit.called)
+ self.assertTrue(self.component._up.called)
+ self.assertTrue(self.component.healthcheck.called)
+
+ # Unchanged compose configuration
+ self.component.compose_config.has_changed.return_value = False
+ self.component.compose()
+ self.assertTrue(self.component.compose_config.has_changed.called)
+ self.assertTrue(self.component.compose_config.render.not_called)
+ self.assertTrue(self.component.compose_config.commit.not_called)
+ self.assertTrue(self.component._up.not_called)
+ self.assertTrue(self.component.healthcheck.not_called)
+
+
+class TestHTTPComponent(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.component = HTTPComponent("test-component", 1337, {
+ "status": "status",
+ })
+
+ def test_paths(self):
+ # Existing path
+ url = "http://localhost:{port}/{path}".format(
+ port=self.component.port,
+ path="status"
+ )
+ self.assertEqual(self.component._get_url("status"), url)
+
+ # Missing path
+ self.assertRaises(NotImplementedError, self.component._get_url, "_")
+
+
+class TestConfigComponent(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ with mock.patch("reactive.component.Config"):
+ cls.component = ConfigComponent("test-component", 1337, {
+ "status": "status",
+ "configure": "configure"
+ })
+
+ @requests_mock.mock()
+ def test_configure(self, mock_req):
+ with tempfile.NamedTemporaryFile() as config_file:
+ self.component.config.path = config_file.name
+ url = self.component._get_url("configure")
+ mock_req.post(url, status_code=200)
+
+ # Normal configuration procedure when config has changed
+ self.component.config.has_changed.return_value = True
+ self.component.configure()
+ self.assertTrue(self.component.config.has_changed.called)
+ self.assertTrue(self.component.config.render.called)
+ self.assertTrue(self.component.config.commit.called)
+ self.assertEqual(mock_req.call_count, 1)
+
+ self.component.config.reset_mock()
+
+ # Nothing should happen if the config has not changed
+ self.component.config.has_changed.return_value = False
+ self.component.configure()
+ self.assertTrue(self.component.config.has_changed.called)
+ self.assertFalse(self.component.config.render.called)
+ self.assertFalse(self.component.config.commit.called)
+ self.assertEqual(mock_req.call_count, 1)
+
+ self.component.config.reset_mock()
+
+ # Invalid config
+ self.component.config.has_changed.return_value = True
+ mock_req.post(url, status_code=400)
+ self.assertRaises(RequestException, self.component.configure)
+ self.assertEqual(mock_req.call_count, 2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/unit_tests/test_config.py b/unit_tests/test_config.py
new file mode 100644
index 0000000..a408b63
--- /dev/null
+++ b/unit_tests/test_config.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+import unittest
+import unittest.mock as mock
+
+from reactive.config import Config, ConfigurationException, required
+
+
+class TestConfig(unittest.TestCase):
+ def setUp(self):
+ patcher = mock.patch("reactive.config.charm_dir")
+ self.addCleanup(patcher.stop)
+ patcher.start()
+
+ def test_extend(self):
+ cfg = Config("test-config", "path")
+
+ # Initial config
+ self.assertEqual(cfg._config, {})
+
+ # Extended config
+ def extend_func(data):
+ return {
+ "some": "stuff",
+ "more": data
+ }
+ cfg.extend(extend_func, "stuff")
+ self.assertEqual(cfg._config, {
+ "some": "stuff",
+ "more": "stuff"
+ })
+
+ def test_has_changed(self):
+ cfg = Config("test-config", "path")
+
+ # Initial config
+ self.assertTrue(cfg.has_changed())
+
+ # Changed config
+ cfg.extend(lambda: {"more": "stuff"})
+ self.assertTrue(cfg.has_changed())
+
+ # Uncommited config
+ self.assertTrue(cfg.has_changed())
+
+ # Unchanged config after commit
+ cfg.commit()
+ self.assertFalse(cfg.has_changed())
+
+ def test_empty_required_value(self):
+ cfg = Config("test-config", "path")
+ some_data = {"some": "stuff", "more": None}
+ self.assertRaises(ConfigurationException, cfg.extend, lambda data: {
+ "some": data["some"],
+ "more": required(data, "more")
+ }, some_data)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/wheelhouse.txt b/wheelhouse.txt
new file mode 100644
index 0000000..e46244f
--- /dev/null
+++ b/wheelhouse.txt
@@ -0,0 +1,7 @@
+requests>=2.11.1,<3.0.0
+backoff>=1.3.2,<2.0.0
+
+# Tests only
+-e git://github.com/simonklb/aiosmtpd.git@merged#egg=aiosmtpd
+aiozmq
+msgpack-python