From b2d91c6cf164b5e46109a622eccb4c9f5e53b6d0 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sun, 19 Mar 2017 02:03:16 +0000 Subject: [PATCH 01/15] readme updates (note actions and bundle deployments); lint fixes for actions; show app version in status output --- .../src/charm/hbase/layer-hbase/README.md | 91 +++++++++++++------ .../charm/hbase/layer-hbase/actions/restart | 7 +- .../src/charm/hbase/layer-hbase/actions/start | 7 +- .../layer-hbase/actions/start-hbase-master | 2 +- .../actions/start-hbase-regionserver | 2 +- .../src/charm/hbase/layer-hbase/actions/stop | 7 +- .../layer-hbase/actions/stop-hbase-master | 2 +- .../actions/stop-hbase-regionserver | 2 +- .../src/charm/hbase/layer-hbase/metadata.yaml | 4 +- .../charm/hbase/layer-hbase/reactive/hbase.py | 7 +- 10 files changed, 88 insertions(+), 43 deletions(-) diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/README.md b/bigtop-packages/src/charm/hbase/layer-hbase/README.md index a63ddf83a..d5b714a4d 100644 --- a/bigtop-packages/src/charm/hbase/layer-hbase/README.md +++ b/bigtop-packages/src/charm/hbase/layer-hbase/README.md @@ -16,7 +16,7 @@ --> # Overview -HBase is the Hadoop database. Think of it as a distributed scalable Big Data +HBase is the Hadoop database. Think of it as a distributed, scalable Big Data store. Use HBase when you need random, realtime read/write access to your Big Data. @@ -64,30 +64,21 @@ In a distributed HBase environment, one master and one regionserver are deployed on each unit. HBase makes sure that only one master is active and the rest are in standby mode in case the active one fails. -HBase operates over HDFS, so we first need to deploy an HDFS cluster: +Because HBase requires HDFS, this charm is recommended to be deployed as part +of the `hadoop-hbase` bundle: - juju deploy hadoop-namenode namenode - juju deploy hadoop-slave slave - juju deploy hadoop-plugin plugin + juju deploy hadoop-hbase - juju add-relation namenode slave - juju add-relation plugin namenode - -In order to function correctly, the HBase master and regionserver applications -have a mandatory relationship with Zookeeper. Use the zookeeper charm to -create a functional zookeeper quorum. Remember that quorums come in odd numbers -starting with 3 (one will work, but will offer no resilience): - - juju deploy zookeeper -n 3 - -Now add HBase scaled to 3 units and add the required relations: - - juju deploy hbase -n 3 +> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version +of Juju, use [juju-quickstart][] with the following syntax: `juju quickstart +hadoop-processing`. - juju add-relation plugin hbase - juju add-relation zookeeper hbase +This will deploy an Apache Bigtop Hadoop cluster with 3 HBase units. More +information about this deployment can be found in the +[bundle readme](https://jujucharms.com/hadoop-hbase/). -The charm also supports use of the thrift gateway. +This charm also supports the Thrift client API for HBase. Thrift is both +cross-platform and more lightweight than REST for many operations. ## Network-Restricted Environments Charms can be deployed in environments with limited network access. To deploy @@ -95,6 +86,7 @@ in this environment, configure a Juju model with appropriate proxy and/or mirror options. See [Configuring Models][] for more information. [getting-started]: https://jujucharms.com/docs/stable/getting-started +[juju-quickstart]: https://launchpad.net/juju-quickstart [Configuring Models]: https://jujucharms.com/docs/stable/models-config @@ -140,14 +132,61 @@ more information about a specific smoke test with: > **Note**: The above assumes Juju 2.0 or greater. If using an earlier version of Juju, the syntax is `juju action fetch `. +## HBase web UI +HBase provides a web console that can be used to verify information about +the cluster. To access it, find the `PUBLIC-ADDRESS` of any hbase unit and +expose the application: + + juju status hbase + juju expose hbase + +The web interface will be available at the following URL: + + http://HBASE_PUBLIC_IP:60010 + + +# Using + +Once the deployment has been verified, there are a number of actions available +in this charm. +> **Note**: Actions described below assume Juju 2.0 or greater. If using an +earlier version of Juju, the action syntax is: +`juju action do hbase/0 ; juju action fetch `. + +Run a performance test: + + juju run-action hbase/0 perf-test + juju show-action-output # <-- id from above command + +Run a smoke test (as described in the above **Verifying** section): + + juju run-action hbase/0 smoke-test + juju show-action-output # <-- id from above command + +Start/Stop/Restart all HBase services on a unit: + + juju run-action hbase/0 [start|stop|restart] + juju show-action-output # <-- id from above command + + +Start/Stop the HBase Master service on a unit: + + juju run-action hbase/0 [start|stop]-hbase-master + juju show-action-output # <-- id from above command + +Start/Stop the HBase RegionServer and Thrift services on a unit: + + juju run-action hbase/0 [start|stop]-hbase-regionserver + juju show-action-output # <-- id from above command + # Limitations Restarting an HBase deployment is potentially disruptive. Be aware that the following events will cause a restart: -- Zookeeper service units joining or departing relations. -- Upgrading the charm or changing the configuration. +- Zookeeper units joining or departing the quorum. +- Upgrading the hbase charm. # Contact Information @@ -157,10 +196,10 @@ following events will cause a restart: # Resources -- [Apache Bigtop](http://bigtop.apache.org/) home page -- [Apache Bigtop mailing lists](http://bigtop.apache.org/mail-lists.html) - [Apache HBase home page](https://hbase.apache.org/) -- [Apache Zookeeper issue tracker](https://issues.apache.org/jira/browse/HBASE) +- [Apache HBase issue tracker](https://issues.apache.org/jira/browse/HBASE) +- [Apache Bigtop home page](http://bigtop.apache.org/) +- [Apache Bigtop mailing lists](http://bigtop.apache.org/mail-lists.html) - [Juju Bigtop charms](https://jujucharms.com/q/apache/bigtop) - [Juju mailing list](https://lists.ubuntu.com/mailman/listinfo/juju) - [Juju community](https://jujucharms.com/community) diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart b/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart index c798db733..9ce0a62d8 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/restart @@ -18,15 +18,16 @@ import sys sys.path.append('lib') -from charmhelpers.core import hookenv -from charms.reactive import is_state -from charms.layer.bigtop_hbase import HBase +from charmhelpers.core import hookenv # noqa: E402 +from charms.reactive import is_state # noqa: E402 +from charms.layer.bigtop_hbase import HBase # noqa: E402 def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start index 9a6c91b08..9a5a473c4 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start @@ -18,15 +18,16 @@ import sys sys.path.append('lib') -from charmhelpers.core import hookenv -from charms.reactive import is_state -from charms.layer.bigtop_hbase import HBase +from charmhelpers.core import hookenv # noqa: E402 +from charms.reactive import is_state # noqa: E402 +from charms.layer.bigtop_hbase import HBase # noqa: E402 def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master index 85aa926ce..13bc1bc2a 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-master @@ -16,7 +16,6 @@ # limitations under the License. import sys -sys.path.append('lib') from charmhelpers.core import host, hookenv from charms.reactive import is_state @@ -26,6 +25,7 @@ def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver index aaa2ad763..b7c01a7dc 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/start-hbase-regionserver @@ -16,7 +16,6 @@ # limitations under the License. import sys -sys.path.append('lib') from charmhelpers.core import host, hookenv from charms.reactive import is_state @@ -26,6 +25,7 @@ def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop index 7cc16ffdb..8bdbad6b0 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop @@ -18,15 +18,16 @@ import sys sys.path.append('lib') -from charmhelpers.core import hookenv -from charms.reactive import is_state -from charms.layer.bigtop_hbase import HBase +from charmhelpers.core import hookenv # noqa: E402 +from charms.reactive import is_state # noqa: E402 +from charms.layer.bigtop_hbase import HBase # noqa: E402 def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master index 293611865..027908920 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-master @@ -16,7 +16,6 @@ # limitations under the License. import sys -sys.path.append('lib') from charmhelpers.core import host, hookenv from charms.reactive import is_state @@ -26,6 +25,7 @@ def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver index a203dbead..862770ff6 100755 --- a/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver +++ b/bigtop-packages/src/charm/hbase/layer-hbase/actions/stop-hbase-regionserver @@ -16,7 +16,6 @@ # limitations under the License. import sys -sys.path.append('lib') from charmhelpers.core import host, hookenv from charms.reactive import is_state @@ -26,6 +25,7 @@ def fail(msg): hookenv.action_fail(msg) sys.exit() + if not is_state('hbase.installed'): fail('HBase is not yet ready') diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml b/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml index 9157ad1f4..344e6397e 100644 --- a/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml +++ b/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml @@ -2,8 +2,8 @@ name: hbase summary: Apache Bitop HBase maintainer: Juju Big Data description: > - HBase is the Hadoop database. This charm provides a Apache HBase from - Apache Bigtop. + HBase is the Hadoop database. This charm provides HBase from the + Apache Bigtop project. tags: [] requires: zookeeper: diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py b/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py index b11cac23a..a7a1eb4a3 100644 --- a/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py +++ b/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py @@ -17,7 +17,7 @@ from charms.layer.bigtop_hbase import HBase from charmhelpers.core import hookenv from charms.reactive.helpers import data_changed -from charms.layer.apache_bigtop_base import get_layer_opts +from charms.layer.apache_bigtop_base import get_layer_opts, get_package_version @when('bigtop.available') @@ -60,7 +60,10 @@ def installing_hbase(zk, hdfs): hbase.configure(hosts, zks) hbase.open_ports() set_state('hbase.installed') - hookenv.status_set('active', 'ready') + report_status() + # set app version string for juju status output + hbase_version = get_package_version('hbase-master') or 'unknown' + hookenv.application_version_set(hbase_version) @when('hbase.installed') From ac9ed8537abaecb46cf94eb7927cffd9fe736531 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sun, 19 Mar 2017 14:47:48 +0000 Subject: [PATCH 02/15] adding hadoop-hbase bundle --- bigtop-deploy/juju/hadoop-hbase/.gitignore | 2 + bigtop-deploy/juju/hadoop-hbase/README.md | 329 ++++++++++++++++++ .../juju/hadoop-hbase/bundle-dev.yaml | 131 +++++++ .../juju/hadoop-hbase/bundle-local.yaml | 131 +++++++ bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 131 +++++++ bigtop-deploy/juju/hadoop-hbase/copyright | 16 + .../juju/hadoop-hbase/tests/01-bundle.py | 126 +++++++ .../juju/hadoop-hbase/tests/tests.yaml | 7 + 8 files changed, 873 insertions(+) create mode 100644 bigtop-deploy/juju/hadoop-hbase/.gitignore create mode 100644 bigtop-deploy/juju/hadoop-hbase/README.md create mode 100644 bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml create mode 100644 bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml create mode 100644 bigtop-deploy/juju/hadoop-hbase/bundle.yaml create mode 100644 bigtop-deploy/juju/hadoop-hbase/copyright create mode 100755 bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py create mode 100644 bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml diff --git a/bigtop-deploy/juju/hadoop-hbase/.gitignore b/bigtop-deploy/juju/hadoop-hbase/.gitignore new file mode 100644 index 000000000..a295864e3 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/.gitignore @@ -0,0 +1,2 @@ +*.pyc +__pycache__ diff --git a/bigtop-deploy/juju/hadoop-hbase/README.md b/bigtop-deploy/juju/hadoop-hbase/README.md new file mode 100644 index 000000000..b45bf7bc2 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/README.md @@ -0,0 +1,329 @@ + +# Overview + +The Apache Hadoop software library is a framework that allows for the +distributed processing of large data sets across clusters of computers +using a simple programming model. + +Hadoop is designed to scale from a few servers to thousands of machines, +each offering local computation and storage. Rather than rely on hardware +to deliver high-availability, Hadoop can detect and handle failures at the +application layer. This provides a highly-available service on top of a cluster +of machines, each of which may be prone to failure. + +HBase is the Hadoop database. Think of it as a distributed, scalable Big Data +store. + +This bundle provides a complete deployment of Hadoop and HBase components from +[Apache Bigtop][] that performs distributed data processing at scale. Ganglia +and rsyslog applications are also provided to monitor cluster health and syslog +activity. + +[Apache Bigtop]: http://bigtop.apache.org/ + +## Bundle Composition + +The applications that comprise this bundle are spread across 8 units as +follows: + + * NameNode (HDFS) + * ResourceManager (YARN) + * Colocated on the NameNode unit + * Zookeeper + * 3 separate units + * Slave (DataNode and NodeManager) + * 3 separate units + * HBase + * 3 units colocated with the Hadoop Slaves + * Client (Hadoop endpoint) + * Plugin (Facilitates communication with the Hadoop cluster) + * Subordinate to the HBase and Client units + * Ganglia (Web interface for monitoring cluster metrics) + * Colocated on the Client unit + * Rsyslog (Aggregate cluster syslog events in a single location) + * Colocated on the Client unit + +Deploying this bundle results in a fully configured Apache Bigtop +cluster on any supported cloud, which can be scaled to meet workload +demands. + + +# Deploying + +A working Juju installation is assumed to be present. If Juju is not yet set +up, please follow the [getting-started][] instructions prior to deploying this +bundle. + +> **Note**: This bundle requires hardware resources that may exceed limits +of Free-tier or Trial accounts on some clouds. To deploy to these +environments, modify a local copy of [bundle.yaml][] to set +`services: 'X': num_units: 1` and `machines: 'X': constraints: mem=3G` as +needed to satisfy account limits. + +Deploy this bundle from the Juju charm store with the `juju deploy` command: + + juju deploy hadoop-hbase + +> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version +of Juju, use [juju-quickstart][] with the following syntax: `juju quickstart +hadoop-hbase`. + +Alternatively, deploy a locally modified `bundle.yaml` with: + + juju deploy /path/to/bundle.yaml + +> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version +of Juju, use [juju-quickstart][] with the following syntax: `juju quickstart +/path/to/bundle.yaml`. + +The charms in this bundle can also be built from their source layers in the +[Bigtop charm repository][]. See the [Bigtop charm README][] for instructions +on building and deploying these charms locally. + +## Network-Restricted Environments +Charms can be deployed in environments with limited network access. To deploy +in this environment, configure a Juju model with appropriate proxy and/or +mirror options. See [Configuring Models][] for more information. + +[getting-started]: https://jujucharms.com/docs/stable/getting-started +[bundle.yaml]: https://github.com/apache/bigtop/blob/master/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +[juju-quickstart]: https://launchpad.net/juju-quickstart +[Bigtop charm repository]: https://github.com/apache/bigtop/tree/master/bigtop-packages/src/charm +[Bigtop charm README]: https://github.com/apache/bigtop/blob/master/bigtop-packages/src/charm/README.md +[Configuring Models]: https://jujucharms.com/docs/stable/models-config + + +# Verifying + +## Status +The applications that make up this bundle provide status messages to indicate +when they are ready: + + juju status + +This is particularly useful when combined with `watch` to track the on-going +progress of the deployment: + + watch -n 2 juju status + +The message for each unit will provide information about that unit's state. +Once they all indicate that they are ready, perform application smoke tests +to verify that the bundle is working as expected. + +## Smoke Test +The charms for each core component (namenode, resourcemanager, slave, hbase, +and zookeeper) provide a `smoke-test` action that can be used to verify the +application is functioning as expected. Note that the 'slave' component runs +extensive tests provided by Apache Bigtop and may take up to 30 minutes to +complete. Run the smoke-test actions as follows: + + juju run-action namenode/0 smoke-test + juju run-action resourcemanager/0 smoke-test + juju run-action slave/0 smoke-test + juju run-action hbase/0 smoke-test + juju run-action zookeeper/0 smoke-test + +> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version +of Juju, the syntax is `juju action do /0 smoke-test`. + +Watch the progress of the smoke test actions with: + + watch -n 2 juju show-action-status + +> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version +of Juju, the syntax is `juju action status`. + +Eventually, all of the actions should settle to `status: completed`. If +any report `status: failed`, that application is not working as expected. Get +more information about a specific smoke test with: + + juju show-action-output + +> **Note**: The above assumes Juju 2.0 or greater. If using an earlier version +of Juju, the syntax is `juju action fetch `. + +## Utilities +Applications in this bundle include command line and web utilities that +can be used to verify information about the cluster. + +From the command line, show the HDFS dfsadmin report and view the current list +of YARN NodeManager units with the following: + + juju run --application namenode "su hdfs -c 'hdfs dfsadmin -report'" + juju run --application resourcemanager "su yarn -c 'yarn node -list'" + +Show the list of Zookeeper nodes with the following: + + juju run --unit zookeeper/0 'echo "ls /" | /usr/lib/zookeeper/bin/zkCli.sh' + +To access the HDFS web console, find the `PUBLIC-ADDRESS` of the namenode +application and expose it: + + juju status namenode + juju expose namenode + +The web interface will be available at the following URL: + + http://NAMENODE_PUBLIC_IP:50070 + +Similarly, to access the Resource Manager web consoles, find the +`PUBLIC-ADDRESS` of the resourcemanager application and expose it: + + juju status resourcemanager + juju expose resourcemanager + +The YARN and Job History web interfaces will be available at the following URLs: + + http://RESOURCEMANAGER_PUBLIC_IP:8088 + http://RESOURCEMANAGER_PUBLIC_IP:19888 + +Finally, to access the HBase web console, find the `PUBLIC-ADDRESS` of any +hbase unit and expose the application: + + juju status hbase + juju expose hbase + +The web interface will be available at the following URL: + + http://HBASE_PUBLIC_IP:60010 + + +# Monitoring + +This bundle includes Ganglia for system-level monitoring of the namenode, +resourcemanager, slave, hbase, and zookeeper units. Metrics are sent to a +centralized ganglia unit for easy viewing in a browser. To view the ganglia web +interface, find the `PUBLIC-ADDRESS` of the Ganglia application and expose it: + + juju status ganglia + juju expose ganglia + +The web interface will be available at: + + http://GANGLIA_PUBLIC_IP/ganglia + + +# Logging + +This bundle includes rsyslog to collect syslog data from the namenode, +resourcemanager, slave, hbase, and zookeeper units. These logs are sent to a +centralized rsyslog unit for easy syslog analysis. One method of viewing this +log data is to simply cat syslog from the rsyslog unit: + + juju run --unit rsyslog/0 'sudo cat /var/log/syslog' + +Logs may also be forwarded to an external rsyslog processing service. See +the *Forwarding logs to a system outside of the Juju environment* section of +the [rsyslog README](https://jujucharms.com/rsyslog/) for more information. + + +# Benchmarking + +The `resourcemanager` charm in this bundle provide several benchmarks to gauge +the performance of the Hadoop cluster. Each benchmark is an action that can be +run with `juju run-action`: + + $ juju actions resourcemanager + ACTION DESCRIPTION + mrbench Mapreduce benchmark for small jobs + nnbench Load test the NameNode hardware and configuration + smoke-test Run an Apache Bigtop smoke test. + teragen Generate data with teragen + terasort Runs teragen to generate sample data, and then runs terasort to sort that data + testdfsio DFS IO Testing + + $ juju run-action resourcemanager/0 nnbench + Action queued with id: 55887b40-116c-4020-8b35-1e28a54cc622 + + $ juju show-action-output 55887b40-116c-4020-8b35-1e28a54cc622 + results: + meta: + composite: + direction: asc + units: secs + value: "128" + start: 2016-02-04T14:55:39Z + stop: 2016-02-04T14:57:47Z + results: + raw: '{"BAD_ID": "0", "FILE: Number of read operations": "0", "Reduce input groups": + "8", "Reduce input records": "95", "Map output bytes": "1823", "Map input records": + "12", "Combine input records": "0", "HDFS: Number of bytes read": "18635", "FILE: + Number of bytes written": "32999982", "HDFS: Number of write operations": "330", + "Combine output records": "0", "Total committed heap usage (bytes)": "3144749056", + "Bytes Written": "164", "WRONG_LENGTH": "0", "Failed Shuffles": "0", "FILE: + Number of bytes read": "27879457", "WRONG_MAP": "0", "Spilled Records": "190", + "Merged Map outputs": "72", "HDFS: Number of large read operations": "0", "Reduce + shuffle bytes": "2445", "FILE: Number of large read operations": "0", "Map output + materialized bytes": "2445", "IO_ERROR": "0", "CONNECTION": "0", "HDFS: Number + of read operations": "567", "Map output records": "95", "Reduce output records": + "8", "WRONG_REDUCE": "0", "HDFS: Number of bytes written": "27412", "GC time + elapsed (ms)": "603", "Input split bytes": "1610", "Shuffled Maps ": "72", "FILE: + Number of write operations": "0", "Bytes Read": "1490"}' + status: completed + timing: + completed: 2016-02-04 14:57:48 +0000 UTC + enqueued: 2016-02-04 14:55:14 +0000 UTC + started: 2016-02-04 14:55:27 +0000 UTC + +The `hbase` charm in this bundle also provides a benchmark to gauge +the performance of the HBase cluster: + + $ juju run-action hbase/0 perf-test + Action queued with id: 339cec1f-e903-4ee7-85ca-876fb0c3d28e + + $ juju show-action-output 339cec1f-e903-4ee7-85ca-876fb0c3d28e + results: + meta: + composite: + direction: asc + units: secs + value: "200.754000" + status: completed + timing: + completed: 2016-11-02 03:11:48 +0000 UTC + enqueued: 2016-11-02 03:08:21 +0000 UTC + started: 2016-11-02 03:08:26 +0000 UTC + + +# Scaling + +By default, three Hadoop slave, HBase, and Zookeeper units are deployed. +Scaling these applications is as simple as adding more units. To add one unit: + + juju add-unit slave + juju add-unit hbase + juju add-unit zookeeper + +Multiple units may be added at once. For example, add four more slave units: + + juju add-unit -n4 slave + + +# Contact Information + +- + + +# Resources + +- [Apache Bigtop home page](http://bigtop.apache.org/) +- [Apache Bigtop issue tracking](http://bigtop.apache.org/issue-tracking.html) +- [Apache Bigtop mailing lists](http://bigtop.apache.org/mail-lists.html) +- [Juju Bigtop charms](https://jujucharms.com/q/apache/bigtop) +- [Juju mailing list](https://lists.ubuntu.com/mailman/listinfo/juju) +- [Juju community](https://jujucharms.com/community) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml new file mode 100644 index 000000000..42cd65e9a --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml @@ -0,0 +1,131 @@ +services: + namenode: + charm: "cs:~bigdata-dev/xenial/hadoop-namenode" + constraints: "mem=7G root-disk=32G" + num_units: 1 + annotations: + gui-x: "500" + gui-y: "800" + to: + - "0" + resourcemanager: + charm: "cs:~bigdata-dev/xenial/hadoop-resourcemanager" + constraints: "mem=7G root-disk=32G" + num_units: 1 + annotations: + gui-x: "500" + gui-y: "0" + to: + - "0" + slave: + charm: "cs:~bigdata-dev/xenial/hadoop-slave" + constraints: "mem=7G root-disk=32G" + num_units: 3 + annotations: + gui-x: "0" + gui-y: "400" + to: + - "1" + - "2" + - "3" + plugin: + charm: "cs:~bigdata-dev/xenial/hadoop-plugin" + annotations: + gui-x: "1000" + gui-y: "400" + client: + charm: "cs:xenial/hadoop-client-3" + constraints: "mem=3G root-disk=32G" + num_units: 1 + annotations: + gui-x: "1250" + gui-y: "400" + to: + - "4" + hbase: + charm: "cs:~bigdata-dev/xenial/hbase" + constraints: "mem=7G root-disk=32G" + num_units: 3 + annotations: + gui-x: "1000" + gui-y: "0" + to: + - "1" + - "2" + - "3" + zookeeper: + charm: "cs:xenial/zookeeper-10" + constraints: "mem=3G root-disk=32G" + num_units: 3 + annotations: + gui-x: "500" + gui-y: "400" + to: + - "5" + - "6" + - "7" + ganglia: + charm: "cs:~bigdata-dev/xenial/ganglia-5" + num_units: 1 + annotations: + gui-x: "0" + gui-y: "800" + to: + - "4" + ganglia-node: + charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + annotations: + gui-x: "250" + gui-y: "400" + rsyslog: + charm: "cs:~bigdata-dev/xenial/rsyslog-7" + num_units: 1 + annotations: + gui-x: "1000" + gui-y: "800" + to: + - "4" + rsyslog-forwarder-ha: + charm: "cs:~bigdata-dev/xenial/rsyslog-forwarder-ha-7" + annotations: + gui-x: "750" + gui-y: "400" +series: xenial +relations: + - [resourcemanager, namenode] + - [namenode, slave] + - [resourcemanager, slave] + - [plugin, namenode] + - [plugin, resourcemanager] + - [client, plugin] + - [hbase, plugin] + - [hbase, zookeeper] + - ["ganglia-node:juju-info", "namenode:juju-info"] + - ["ganglia-node:juju-info", "resourcemanager:juju-info"] + - ["ganglia-node:juju-info", "slave:juju-info"] + - ["ganglia-node:juju-info", "hbase:juju-info"] + - ["ganglia-node:juju-info", "zookeeper:juju-info"] + - ["ganglia:node", "ganglia-node:node"] + - ["rsyslog-forwarder-ha:juju-info", "namenode:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "resourcemanager:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "slave:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "hbase:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "zookeeper:juju-info"] + - ["rsyslog:aggregator", "rsyslog-forwarder-ha:syslog"] +machines: + "0": + series: "xenial" + "1": + series: "xenial" + "2": + series: "xenial" + "3": + series: "xenial" + "4": + series: "xenial" + "5": + series: "xenial" + "6": + series: "xenial" + "7": + series: "xenial" diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml new file mode 100644 index 000000000..bcc1089d1 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml @@ -0,0 +1,131 @@ +services: + namenode: + charm: "/home/ubuntu/charms/xenial/hadoop-namenode" + constraints: "mem=7G root-disk=32G" + num_units: 1 + annotations: + gui-x: "500" + gui-y: "800" + to: + - "0" + resourcemanager: + charm: "/home/ubuntu/charms/xenial/hadoop-resourcemanager" + constraints: "mem=7G root-disk=32G" + num_units: 1 + annotations: + gui-x: "500" + gui-y: "0" + to: + - "0" + slave: + charm: "/home/ubuntu/charms/xenial/hadoop-slave" + constraints: "mem=7G root-disk=32G" + num_units: 3 + annotations: + gui-x: "0" + gui-y: "400" + to: + - "1" + - "2" + - "3" + plugin: + charm: "/home/ubuntu/charms/xenial/hadoop-plugin" + annotations: + gui-x: "1000" + gui-y: "400" + client: + charm: "cs:xenial/hadoop-client-3" + constraints: "mem=3G root-disk=32G" + num_units: 1 + annotations: + gui-x: "1250" + gui-y: "400" + to: + - "4" + hbase: + charm: "/home/ubuntu/charms/xenial/hbase" + constraints: "mem=7G root-disk=32G" + num_units: 3 + annotations: + gui-x: "1000" + gui-y: "0" + to: + - "1" + - "2" + - "3" + zookeeper: + charm: "cs:xenial/zookeeper-10" + constraints: "mem=3G root-disk=32G" + num_units: 3 + annotations: + gui-x: "500" + gui-y: "400" + to: + - "5" + - "6" + - "7" + ganglia: + charm: "cs:~bigdata-dev/xenial/ganglia-5" + num_units: 1 + annotations: + gui-x: "0" + gui-y: "800" + to: + - "4" + ganglia-node: + charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + annotations: + gui-x: "250" + gui-y: "400" + rsyslog: + charm: "cs:~bigdata-dev/xenial/rsyslog-7" + num_units: 1 + annotations: + gui-x: "1000" + gui-y: "800" + to: + - "4" + rsyslog-forwarder-ha: + charm: "cs:~bigdata-dev/xenial/rsyslog-forwarder-ha-7" + annotations: + gui-x: "750" + gui-y: "400" +series: xenial +relations: + - [resourcemanager, namenode] + - [namenode, slave] + - [resourcemanager, slave] + - [plugin, namenode] + - [plugin, resourcemanager] + - [client, plugin] + - [hbase, plugin] + - [hbase, zookeeper] + - ["ganglia-node:juju-info", "namenode:juju-info"] + - ["ganglia-node:juju-info", "resourcemanager:juju-info"] + - ["ganglia-node:juju-info", "slave:juju-info"] + - ["ganglia-node:juju-info", "hbase:juju-info"] + - ["ganglia-node:juju-info", "zookeeper:juju-info"] + - ["ganglia:node", "ganglia-node:node"] + - ["rsyslog-forwarder-ha:juju-info", "namenode:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "resourcemanager:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "slave:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "hbase:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "zookeeper:juju-info"] + - ["rsyslog:aggregator", "rsyslog-forwarder-ha:syslog"] +machines: + "0": + series: "xenial" + "1": + series: "xenial" + "2": + series: "xenial" + "3": + series: "xenial" + "4": + series: "xenial" + "5": + series: "xenial" + "6": + series: "xenial" + "7": + series: "xenial" diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml new file mode 100644 index 000000000..31eb365c7 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -0,0 +1,131 @@ +services: + namenode: + charm: "cs:xenial/hadoop-namenode-8" + constraints: "mem=7G root-disk=32G" + num_units: 1 + annotations: + gui-x: "500" + gui-y: "800" + to: + - "0" + resourcemanager: + charm: "cs:xenial/hadoop-resourcemanager-8" + constraints: "mem=7G root-disk=32G" + num_units: 1 + annotations: + gui-x: "500" + gui-y: "0" + to: + - "0" + slave: + charm: "cs:xenial/hadoop-slave-8" + constraints: "mem=7G root-disk=32G" + num_units: 3 + annotations: + gui-x: "0" + gui-y: "400" + to: + - "1" + - "2" + - "3" + plugin: + charm: "cs:xenial/hadoop-plugin-8" + annotations: + gui-x: "1000" + gui-y: "400" + client: + charm: "cs:xenial/hadoop-client-3" + constraints: "mem=3G root-disk=32G" + num_units: 1 + annotations: + gui-x: "1250" + gui-y: "400" + to: + - "4" + hbase: + charm: "cs:xenial/hbase-2" + constraints: "mem=7G root-disk=32G" + num_units: 3 + annotations: + gui-x: "1000" + gui-y: "0" + to: + - "1" + - "2" + - "3" + zookeeper: + charm: "cs:xenial/zookeeper-10" + constraints: "mem=3G root-disk=32G" + num_units: 3 + annotations: + gui-x: "500" + gui-y: "400" + to: + - "5" + - "6" + - "7" + ganglia: + charm: "cs:~bigdata-dev/xenial/ganglia-5" + num_units: 1 + annotations: + gui-x: "0" + gui-y: "800" + to: + - "4" + ganglia-node: + charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + annotations: + gui-x: "250" + gui-y: "400" + rsyslog: + charm: "cs:~bigdata-dev/xenial/rsyslog-7" + num_units: 1 + annotations: + gui-x: "1000" + gui-y: "800" + to: + - "4" + rsyslog-forwarder-ha: + charm: "cs:~bigdata-dev/xenial/rsyslog-forwarder-ha-7" + annotations: + gui-x: "750" + gui-y: "400" +series: xenial +relations: + - [resourcemanager, namenode] + - [namenode, slave] + - [resourcemanager, slave] + - [plugin, namenode] + - [plugin, resourcemanager] + - [client, plugin] + - [hbase, plugin] + - [hbase, zookeeper] + - ["ganglia-node:juju-info", "namenode:juju-info"] + - ["ganglia-node:juju-info", "resourcemanager:juju-info"] + - ["ganglia-node:juju-info", "slave:juju-info"] + - ["ganglia-node:juju-info", "hbase:juju-info"] + - ["ganglia-node:juju-info", "zookeeper:juju-info"] + - ["ganglia:node", "ganglia-node:node"] + - ["rsyslog-forwarder-ha:juju-info", "namenode:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "resourcemanager:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "slave:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "hbase:juju-info"] + - ["rsyslog-forwarder-ha:juju-info", "zookeeper:juju-info"] + - ["rsyslog:aggregator", "rsyslog-forwarder-ha:syslog"] +machines: + "0": + series: "xenial" + "1": + series: "xenial" + "2": + series: "xenial" + "3": + series: "xenial" + "4": + series: "xenial" + "5": + series: "xenial" + "6": + series: "xenial" + "7": + series: "xenial" diff --git a/bigtop-deploy/juju/hadoop-hbase/copyright b/bigtop-deploy/juju/hadoop-hbase/copyright new file mode 100644 index 000000000..e900b97c4 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/copyright @@ -0,0 +1,16 @@ +Format: http://dep.debian.net/deps/dep5/ + +Files: * +Copyright: Copyright 2015, Canonical Ltd., All Rights Reserved. +License: Apache License 2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + . + http://www.apache.org/licenses/LICENSE-2.0 + . + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py new file mode 100755 index 000000000..96a238366 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python3 + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import amulet +import os +import re +import unittest +import yaml + + +class TestBundle(unittest.TestCase): + bundle_file = os.path.join(os.path.dirname(__file__), '..', 'bundle.yaml') + + @classmethod + def setUpClass(cls): + # classmethod inheritance doesn't work quite right with + # setUpClass / tearDownClass, so subclasses have to manually call this + cls.d = amulet.Deployment(series='xenial') + with open(cls.bundle_file) as f: + bun = f.read() + bundle = yaml.safe_load(bun) + + cls.d.load(bundle) + cls.d.setup(timeout=3600) + + # we need units reporting ready before we attempt our smoke tests + cls.d.sentry.wait_for_messages({'client': re.compile('ready'), + 'namenode': re.compile('ready'), + 'resourcemanager': re.compile('ready'), + 'slave': re.compile('ready'), + 'hbase': re.compile('ready'), + }, timeout=3600) + cls.hdfs = cls.d.sentry['namenode'][0] + cls.yarn = cls.d.sentry['resourcemanager'][0] + cls.slave = cls.d.sentry['slave'][0] + cls.hbase = cls.d.sentry['hbase'][0] + + def test_components(self): + """ + Confirm that all of the required components are up and running. + """ + hdfs, retcode = self.hdfs.run("pgrep -a java") + yarn, retcode = self.yarn.run("pgrep -a java") + slave, retcode = self.slave.run("pgrep -a java") + hbase, retcode = self.hbase.run("pgrep -a java") + + assert 'NameNode' in hdfs, "NameNode not started" + assert 'NameNode' not in slave, "NameNode should not be running on slave" + + assert 'ResourceManager' in yarn, "ResourceManager not started" + assert 'ResourceManager' not in slave, "ResourceManager should not be running on slave" + + assert 'JobHistoryServer' in yarn, "JobHistoryServer not started" + assert 'JobHistoryServer' not in slave, "JobHistoryServer should not be running on slave" + + assert 'NodeManager' in slave, "NodeManager not started" + assert 'NodeManager' not in yarn, "NodeManager should not be running on resourcemanager" + assert 'NodeManager' not in hdfs, "NodeManager should not be running on namenode" + + assert 'DataNode' in slave, "DataNode not started" + assert 'DataNode' not in yarn, "DataNode should not be running on resourcemanager" + assert 'DataNode' not in hdfs, "DataNode should not be running on namenode" + + assert 'Master' in hbase, "HBase Master not started" + + def test_hdfs(self): + """ + Validates mkdir, ls, chmod, and rm HDFS operations. + """ + uuid = self.hdfs.run_action('smoke-test') + result = self.d.action_fetch(uuid, timeout=600, full_output=True) + # action status=completed on success + if (result['status'] != "completed"): + self.fail('HDFS smoke-test did not complete: %s' % result) + + def test_yarn(self): + """ + Validates YARN using the Bigtop 'yarn' smoke test. + """ + uuid = self.yarn.run_action('smoke-test') + # 'yarn' smoke takes a while (bigtop tests download lots of stuff) + result = self.d.action_fetch(uuid, timeout=1800, full_output=True) + # action status=completed on success + if (result['status'] != "completed"): + self.fail('YARN smoke-test did not complete: %s' % result) + + def test_hbase(self): + """ + Validates HBase with a simple smoke test. + """ + uuid = self.hbase.run_action('smoke-test') + result = self.d.action_fetch(uuid, timeout=600, full_output=True) + # action status=completed on success + if (result['status'] != "completed"): + self.fail('HBase smoke-test did not complete: %s' % result) + + @unittest.skip( + 'Skipping slave smoke tests; they are too inconsistent and long running for CWR.') + def test_slave(self): + """ + Validates slave using the Bigtop 'hdfs' and 'mapred' smoke test. + """ + uuid = self.slave.run_action('smoke-test') + # 'hdfs+mapred' smoke takes a long while (bigtop tests are slow) + result = self.d.action_fetch(uuid, timeout=3600, full_output=True) + # action status=completed on success + if (result['status'] != "completed"): + self.fail('Slave smoke-test did not complete: %s' % result) + + +if __name__ == '__main__': + unittest.main() diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml new file mode 100644 index 000000000..c9325b05e --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml @@ -0,0 +1,7 @@ +reset: false +deployment_timeout: 7200 +sources: + - 'ppa:juju/stable' +packages: + - amulet + - python3-yaml From 039a61858cdfa754324efea22735d608eee04c9c Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sun, 19 Mar 2017 14:50:35 +0000 Subject: [PATCH 03/15] wrong rev for hbase --- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index 31eb365c7..714bdd320 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -43,7 +43,7 @@ services: to: - "4" hbase: - charm: "cs:xenial/hbase-2" + charm: "cs:xenial/hbase-9" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: From da7e3cb845ad0135d1c251a236d62aa76bf8044b Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Wed, 22 Mar 2017 05:38:10 +0000 Subject: [PATCH 04/15] charm rev bump --- bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml | 6 +++--- bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml | 6 +++--- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 14 +++++++------- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml index 42cd65e9a..2298cbb07 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml @@ -35,7 +35,7 @@ services: gui-y: "400" client: charm: "cs:xenial/hadoop-client-3" - constraints: "mem=3G root-disk=32G" + constraints: "mem=3G" num_units: 1 annotations: gui-x: "1250" @@ -47,14 +47,14 @@ services: constraints: "mem=7G root-disk=32G" num_units: 3 annotations: - gui-x: "1000" + gui-x: "0" gui-y: "0" to: - "1" - "2" - "3" zookeeper: - charm: "cs:xenial/zookeeper-10" + charm: "cs:~bigdata-dev/xenial/zookeeper" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml index bcc1089d1..473b5890b 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml @@ -35,7 +35,7 @@ services: gui-y: "400" client: charm: "cs:xenial/hadoop-client-3" - constraints: "mem=3G root-disk=32G" + constraints: "mem=3G" num_units: 1 annotations: gui-x: "1250" @@ -47,14 +47,14 @@ services: constraints: "mem=7G root-disk=32G" num_units: 3 annotations: - gui-x: "1000" + gui-x: "0" gui-y: "0" to: - "1" - "2" - "3" zookeeper: - charm: "cs:xenial/zookeeper-10" + charm: "/home/ubuntu/charms/xenial/zookeeper" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index 714bdd320..dacec007a 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -1,6 +1,6 @@ services: namenode: - charm: "cs:xenial/hadoop-namenode-8" + charm: "cs:xenial/hadoop-namenode-11" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-8" + charm: "cs:xenial/hadoop-resourcemanager-11" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -18,7 +18,7 @@ services: to: - "0" slave: - charm: "cs:xenial/hadoop-slave-8" + charm: "cs:xenial/hadoop-slave-11" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: @@ -29,13 +29,13 @@ services: - "2" - "3" plugin: - charm: "cs:xenial/hadoop-plugin-8" + charm: "cs:xenial/hadoop-plugin-11" annotations: gui-x: "1000" gui-y: "400" client: charm: "cs:xenial/hadoop-client-3" - constraints: "mem=3G root-disk=32G" + constraints: "mem=3G" num_units: 1 annotations: gui-x: "1250" @@ -47,14 +47,14 @@ services: constraints: "mem=7G root-disk=32G" num_units: 3 annotations: - gui-x: "1000" + gui-x: "0" gui-y: "0" to: - "1" - "2" - "3" zookeeper: - charm: "cs:xenial/zookeeper-10" + charm: "cs:xenial/zookeeper-12" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: From 63453c5d7ffbde8ec4da0bdbfdab00732424bff9 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Fri, 24 Mar 2017 03:53:07 +0000 Subject: [PATCH 05/15] charm updates from recent Juju CI - hbase should include quorum interface - kafka/mahout/spark/zk tests needed robustification --- .../src/charm/hbase/layer-hbase/layer.yaml | 1 + .../src/charm/hbase/layer-hbase/metadata.yaml | 3 ++ .../charm/hbase/layer-hbase/reactive/hbase.py | 5 +- .../layer-kafka/tests/10-config-changed.py | 18 ++++--- .../mahout/layer-mahout/actions/smoke-test | 53 ++++++++++++++----- .../mahout/layer-mahout/reactive/mahout.py | 7 ++- .../layer-spark/tests/03-scale-standalone.py | 16 ++++-- .../spark/layer-spark/tests/10-test-ha.py | 16 ++++-- .../layer-zookeeper/tests/01-deploy-smoke.py | 9 ++-- .../layer-zookeeper/tests/10-bind-address.py | 5 +- 10 files changed, 96 insertions(+), 37 deletions(-) diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml b/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml index ebebc5676..a35e252a8 100644 --- a/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml +++ b/bigtop-packages/src/charm/hbase/layer-hbase/layer.yaml @@ -5,6 +5,7 @@ includes: - 'interface:zookeeper' - 'interface:benchmark' - 'interface:hbase' + - 'interface:hbase-quorum' options: apache-bigtop-base: ports: diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml b/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml index 344e6397e..821f3fa8e 100644 --- a/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml +++ b/bigtop-packages/src/charm/hbase/layer-hbase/metadata.yaml @@ -13,3 +13,6 @@ provides: interface: hbase benchmark: interface: benchmark +peers: + hbpeer: + interface: hbase-quorum diff --git a/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py b/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py index a7a1eb4a3..26751b597 100644 --- a/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py +++ b/bigtop-packages/src/charm/hbase/layer-hbase/reactive/hbase.py @@ -45,9 +45,10 @@ def report_status(): @when('bigtop.available', 'zookeeper.ready', 'hadoop.hdfs.ready') -def installing_hbase(zk, hdfs): +def install_hbase(zk, hdfs): zks = zk.zookeepers() - if is_state('hbase.installed') and (not data_changed('zks', zks)): + if (is_state('hbase.installed') and + (not data_changed('zks', zks))): return msg = "configuring hbase" if is_state('hbase.installed') else "installing hbase" diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py index 4fd44ceb8..27e875369 100755 --- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py +++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py @@ -29,14 +29,18 @@ class TestConfigChanged(unittest.TestCase): @classmethod def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('kafka', charm='kafka') - cls.d.add('zookeeper', charm='cs:xenial/zookeeper') + cls.d.add('kafka-test', charm='kafka') + cls.d.add('zk-test', charm='cs:xenial/zookeeper') - cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper') + cls.d.relate('kafka-test:zookeeper', 'zk-test:zookeeper') cls.d.setup(timeout=1800) - cls.d.sentry.wait_for_messages({'kafka': 'ready'}, timeout=1800) - cls.unit = cls.d.sentry['kafka'][0] + cls.d.sentry.wait_for_messages({'kafka-test': 'ready'}, timeout=1800) + cls.unit = cls.d.sentry['kafka-test'][0] + + @classmethod + def tearDownClass(cls): + cls.d.remove_service('zk-test', 'kafka-test') def test_bind_network_interface(self): """ @@ -59,7 +63,7 @@ def test_bind_network_interface(self): raise Exception( "Could not find any interface on the unit that matched my " "criteria.") - self.d.configure('kafka', {'network_interface': network_interface}) + self.d.configure('kafka-test', {'network_interface': network_interface}) # NB: we used to watch for a maintenance status message, but every now # and then, we'd miss it. Wait 2m to let the config-changed hook settle. @@ -85,7 +89,7 @@ def test_reset_network_interface(self): """ Verify that we can reset the client port bindings to 0.0.0.0 """ - self.d.configure('kafka', {'network_interface': '0.0.0.0'}) + self.d.configure('kafka-test', {'network_interface': '0.0.0.0'}) # NB: we used to watch for a maintenance status message, but every now # and then, we'd miss it. Wait 2m to let the config-changed hook settle. diff --git a/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test b/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test index 22cf7c713..cc98b4615 100755 --- a/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test +++ b/bigtop-packages/src/charm/mahout/layer-mahout/actions/smoke-test @@ -17,19 +17,48 @@ set -ex -if hdfs dfs -stat /tmp/input/ &> /dev/null; then - hdfs dfs -rm -r -skipTrash /tmp/input/ || true +if ! charms.reactive is_state 'mahout.installed'; then + action-fail 'Mahout is not yet ready' + exit fi -hdfs dfs -mkdir /tmp/input/ -hdfs dfs -put resources/links-converted.txt /tmp/input/ -hdfs dfs -put resources/users.txt /tmp/input/ +# create dir to store results +RUN=`date +%s` +RESULT_DIR=/opt/mahout-smoke-results +RESULT_LOG=${RESULT_DIR}/${RUN}.log +mkdir -p ${RESULT_DIR} +chown -R ubuntu:ubuntu ${RESULT_DIR} -if hdfs dfs -stat temp &> /dev/null; then - hdfs dfs -rm -r -skipTrash temp || true -fi -if hdfs dfs -stat output &> /dev/null; then - hdfs dfs -rm -r -skipTrash output || true -fi +# hdfs dirs +MAHOUT_SMOKE="/tmp/mahout-smoke" +MAHOUT_INPUT="${MAHOUT_SMOKE}/input" +MAHOUT_OUTPUT="${MAHOUT_SMOKE}/output" + +# remove any previous smoke test run. must be run as ubuntu, since that user +# owns the hdfs space +su - ubuntu -c "hadoop fs -rm -f -r -skipTrash ${MAHOUT_SMOKE}" +su - ubuntu -c "hadoop fs -rm -f -r -skipTrash temp" + +echo 'running mahout smoke-test as the ubuntu user' +# NB: Escaped envars in the block below (e.g., \${CHARM_DIR}) come from +# the environment while non-escaped vars (e.g., ${MAHOUT_INPUT}) come from +# this outer scope. +su ubuntu << EOF +set -x +. /etc/default/hadoop +. /etc/environment + +# setup our smoke test input +hdfs dfs -mkdir -p ${MAHOUT_INPUT} +hdfs dfs -put \${CHARM_DIR}/resources/links-converted.txt ${MAHOUT_INPUT} +hdfs dfs -put \${CHARM_DIR}/resources/users.txt ${MAHOUT_INPUT} -hadoop jar /usr/lib/mahout/mahout-mr-*-job.jar org.apache.mahout.cf.taste.hadoop.item.RecommenderJob -Dmapred.input.dir=/tmp/input/links-converted.txt -Dmapred.output.dir=output --usersFile /tmp/input/users.txt --booleanData -s SIMILARITY_LOGLIKELIHOOD +hadoop jar /usr/lib/mahout/mahout-mr-*-job.jar \ + org.apache.mahout.cf.taste.hadoop.item.RecommenderJob \ + -Dmapreduce.input.fileinputformat.inputdir=${MAHOUT_INPUT}/links-converted.txt \ + -Dmapred.output.dir=${MAHOUT_OUTPUT} \ + --usersFile ${MAHOUT_INPUT}/users.txt \ + --booleanData \ + -s SIMILARITY_LOGLIKELIHOOD 2>&1 | tee -a ${RESULT_LOG} +EOF +echo 'mahout smoke-test complete' diff --git a/bigtop-packages/src/charm/mahout/layer-mahout/reactive/mahout.py b/bigtop-packages/src/charm/mahout/layer-mahout/reactive/mahout.py index e8e68cac3..d6f3b4b2e 100644 --- a/bigtop-packages/src/charm/mahout/layer-mahout/reactive/mahout.py +++ b/bigtop-packages/src/charm/mahout/layer-mahout/reactive/mahout.py @@ -15,7 +15,7 @@ from jujubigdata import utils from charms.reactive import when, when_not, set_state -from charms.layer.apache_bigtop_base import Bigtop +from charms.layer.apache_bigtop_base import Bigtop, get_package_version from charmhelpers.core import hookenv @@ -33,5 +33,8 @@ def install_mahout(): with utils.environment_edit_in_place('/etc/environment') as env: env['MAHOUT_HOME'] = '/usr/lib/mahout' - hookenv.status_set('active', 'ready') set_state('mahout.installed') + hookenv.status_set('active', 'ready') + # set app version string for juju status output + mahout_version = get_package_version('mahout') or 'unknown' + hookenv.application_version_set(mahout_version) diff --git a/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py b/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py index 7d8f4cd13..4392fac8e 100755 --- a/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py +++ b/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py @@ -31,10 +31,18 @@ def setUpClass(cls): cls.d.setup(timeout=3600) cls.d.sentry.wait(timeout=3600) - # Disable tearDown until amulet supports it - # @classmethod - # def tearDownClass(cls): - # cls.d.remove_service('spark-test-scale') + @classmethod + def tearDownClass(cls): + try: + cls.d.remove_service('spark-test-scale') + except OSError as e: + # NB: it looks like remove_service complains if it cannot tear down + # all the units that we spun up in setupClass. Since we manually + # kill units as part of the tests below, allow remove-application + # to fail with an OSError. Pay attention here (kwmonroe) in + # case this needs to be reported as an amulet issue. + print("Amulet remove-service returned: {}".format(e.errno)) + pass def test_scaleup(self): """ diff --git a/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py b/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py index f8a09a6f0..b1aa1e6dd 100755 --- a/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py +++ b/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py @@ -35,10 +35,18 @@ def setUpClass(cls): cls.d.setup(timeout=3600) cls.d.sentry.wait(timeout=3600) - # Disable tearDown until amulet supports it - # @classmethod - # def tearDownClass(cls): - # cls.d.remove_service('spark-test-ha') + @classmethod + def tearDownClass(cls): + try: + cls.d.remove_service('zk-test', 'spark-test-ha') + except OSError as e: + # NB: it looks like remove_service complains if it cannot tear down + # all the units that we spun up in setupClass. Since we manually + # kill units as part of the tests below, allow remove-application + # to fail with an OSError. Pay attention here (kwmonroe) in + # case this needs to be reported as an amulet issue. + print("Amulet remove-service returned: {}".format(e.errno)) + pass def test_master_selected(self): """ diff --git a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py index 1ef64d859..feb45f2a2 100755 --- a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py +++ b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py @@ -16,10 +16,9 @@ # limitations under the License. import amulet +import re import unittest -TIMEOUT = 1800 - class TestDeploy(unittest.TestCase): """ @@ -32,9 +31,9 @@ def setUpClass(cls): cls.d.add('zookeeper', charm='zookeeper', units=3) - cls.d.setup(timeout=TIMEOUT) - cls.d.sentry.wait_for_messages({'zookeeper': 'ready (3 units)'}, - timeout=TIMEOUT) + cls.d.setup(timeout=1800) + cls.d.sentry.wait_for_messages({'zookeeper': re.compile('ready')}, + timeout=1800) cls.unit = cls.d.sentry['zookeeper'][0] def test_deploy(self): diff --git a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py index 8fa2bb7bc..9e6068651 100755 --- a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py +++ b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py @@ -28,7 +28,6 @@ class TestBindClientPort(unittest.TestCase): Test to verify that we can bind to listen for client connections on a specific interface. """ - @classmethod def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') @@ -40,6 +39,10 @@ def setUpClass(cls): timeout=TIMEOUT) cls.unit = cls.d.sentry['zk-test'][0] + @classmethod + def tearDownClass(cls): + cls.d.remove_service('zk-test') + def test_bind_port(self): """ Verify that we update client port bindings successfully. From 8152bd2eb174439123fc2fc83bcd7c93f48b15c7 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Fri, 24 Mar 2017 03:54:44 +0000 Subject: [PATCH 06/15] juju bundle refresh to pick up latest charm revs and exclude tests for non-bigtop components --- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 12 ++++++------ bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml | 6 ++++++ bigtop-deploy/juju/hadoop-kafka/bundle.yaml | 12 ++++++------ bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml | 6 ++++++ bigtop-deploy/juju/hadoop-processing/bundle.yaml | 8 ++++---- .../juju/hadoop-processing/tests/tests.yaml | 6 ++++++ bigtop-deploy/juju/hadoop-spark/bundle.yaml | 12 ++++++------ bigtop-deploy/juju/hadoop-spark/tests/tests.yaml | 6 ++++++ bigtop-deploy/juju/spark-processing/bundle.yaml | 4 ++-- bigtop-deploy/juju/spark-processing/tests/tests.yaml | 12 ++++++++++++ 10 files changed, 60 insertions(+), 24 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index dacec007a..01608c4ef 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -1,6 +1,6 @@ services: namenode: - charm: "cs:xenial/hadoop-namenode-11" + charm: "cs:xenial/hadoop-namenode-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-11" + charm: "cs:xenial/hadoop-resourcemanager-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -18,7 +18,7 @@ services: to: - "0" slave: - charm: "cs:xenial/hadoop-slave-11" + charm: "cs:xenial/hadoop-slave-12" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: @@ -29,7 +29,7 @@ services: - "2" - "3" plugin: - charm: "cs:xenial/hadoop-plugin-11" + charm: "cs:xenial/hadoop-plugin-12" annotations: gui-x: "1000" gui-y: "400" @@ -43,7 +43,7 @@ services: to: - "4" hbase: - charm: "cs:xenial/hbase-9" + charm: "cs:xenial/hbase-10" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: @@ -54,7 +54,7 @@ services: - "2" - "3" zookeeper: - charm: "cs:xenial/zookeeper-12" + charm: "cs:xenial/zookeeper-14" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml index c9325b05e..28516a6ff 100644 --- a/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml @@ -5,3 +5,9 @@ sources: packages: - amulet - python3-yaml +# exclude tests that are unrelated to bigtop. +excludes: + - ganglia + - ganglia-node + - rsyslog + - rsyslog-forwarder-ha diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml index d4cb91f69..c92103009 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml @@ -1,6 +1,6 @@ services: namenode: - charm: "cs:xenial/hadoop-namenode-11" + charm: "cs:xenial/hadoop-namenode-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-11" + charm: "cs:xenial/hadoop-resourcemanager-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -18,7 +18,7 @@ services: to: - "0" slave: - charm: "cs:xenial/hadoop-slave-11" + charm: "cs:xenial/hadoop-slave-12" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: @@ -29,7 +29,7 @@ services: - "2" - "3" plugin: - charm: "cs:xenial/hadoop-plugin-11" + charm: "cs:xenial/hadoop-plugin-12" annotations: gui-x: "1000" gui-y: "400" @@ -52,7 +52,7 @@ services: to: - "4" zookeeper: - charm: "cs:xenial/zookeeper-12" + charm: "cs:xenial/zookeeper-14" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: @@ -63,7 +63,7 @@ services: - "6" - "7" kafka: - charm: "cs:xenial/kafka-7" + charm: "cs:xenial/kafka-9" constraints: "mem=3G" num_units: 1 annotations: diff --git a/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml b/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml index 84f78d7d3..b95174212 100644 --- a/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml @@ -5,3 +5,9 @@ sources: packages: - amulet - python3-yaml +# exclude tests that are unrelated to bigtop. +excludes: + - ganglia + - ganglia-node + - rsyslog + - rsyslog-forwarder-ha diff --git a/bigtop-deploy/juju/hadoop-processing/bundle.yaml b/bigtop-deploy/juju/hadoop-processing/bundle.yaml index c4c6ad66e..90df10c6d 100644 --- a/bigtop-deploy/juju/hadoop-processing/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-processing/bundle.yaml @@ -1,6 +1,6 @@ services: namenode: - charm: "cs:xenial/hadoop-namenode-11" + charm: "cs:xenial/hadoop-namenode-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-11" + charm: "cs:xenial/hadoop-resourcemanager-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -18,7 +18,7 @@ services: to: - "0" slave: - charm: "cs:xenial/hadoop-slave-11" + charm: "cs:xenial/hadoop-slave-12" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: @@ -29,7 +29,7 @@ services: - "2" - "3" plugin: - charm: "cs:xenial/hadoop-plugin-11" + charm: "cs:xenial/hadoop-plugin-12" annotations: gui-x: "1000" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml b/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml index 84f78d7d3..b95174212 100644 --- a/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml @@ -5,3 +5,9 @@ sources: packages: - amulet - python3-yaml +# exclude tests that are unrelated to bigtop. +excludes: + - ganglia + - ganglia-node + - rsyslog + - rsyslog-forwarder-ha diff --git a/bigtop-deploy/juju/hadoop-spark/bundle.yaml b/bigtop-deploy/juju/hadoop-spark/bundle.yaml index 6346c013d..3eb1b4e87 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle.yaml @@ -1,6 +1,6 @@ services: namenode: - charm: "cs:xenial/hadoop-namenode-11" + charm: "cs:xenial/hadoop-namenode-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-11" + charm: "cs:xenial/hadoop-resourcemanager-12" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: @@ -18,7 +18,7 @@ services: to: - "0" slave: - charm: "cs:xenial/hadoop-slave-11" + charm: "cs:xenial/hadoop-slave-12" constraints: "mem=7G root-disk=32G" num_units: 3 annotations: @@ -29,7 +29,7 @@ services: - "2" - "3" plugin: - charm: "cs:xenial/hadoop-plugin-11" + charm: "cs:xenial/hadoop-plugin-12" annotations: gui-x: "1000" gui-y: "400" @@ -43,7 +43,7 @@ services: to: - "4" spark: - charm: "cs:xenial/spark-19" + charm: "cs:xenial/spark-23" constraints: "mem=7G root-disk=32G" num_units: 1 options: @@ -54,7 +54,7 @@ services: to: - "5" zookeeper: - charm: "cs:xenial/zookeeper-12" + charm: "cs:xenial/zookeeper-14" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml b/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml index 84f78d7d3..b95174212 100644 --- a/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml @@ -5,3 +5,9 @@ sources: packages: - amulet - python3-yaml +# exclude tests that are unrelated to bigtop. +excludes: + - ganglia + - ganglia-node + - rsyslog + - rsyslog-forwarder-ha diff --git a/bigtop-deploy/juju/spark-processing/bundle.yaml b/bigtop-deploy/juju/spark-processing/bundle.yaml index 0a37882f2..e467321a1 100644 --- a/bigtop-deploy/juju/spark-processing/bundle.yaml +++ b/bigtop-deploy/juju/spark-processing/bundle.yaml @@ -1,6 +1,6 @@ services: spark: - charm: "cs:xenial/spark-19" + charm: "cs:xenial/spark-23" constraints: "mem=7G root-disk=32G" num_units: 2 annotations: @@ -10,7 +10,7 @@ services: - "0" - "1" zookeeper: - charm: "cs:xenial/zookeeper-12" + charm: "cs:xenial/zookeeper-14" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/spark-processing/tests/tests.yaml b/bigtop-deploy/juju/spark-processing/tests/tests.yaml index 84f78d7d3..e4b472ea4 100644 --- a/bigtop-deploy/juju/spark-processing/tests/tests.yaml +++ b/bigtop-deploy/juju/spark-processing/tests/tests.yaml @@ -5,3 +5,15 @@ sources: packages: - amulet - python3-yaml +# exclude tests that are unrelated to bigtop. the exclusion of spark might +# look weird here, but for this bundle, we only care that spark is good in +# HA mode (covered by this bundle when we invoke the spark smoke-test). the +# typical spark tests will test spark once in standalone and twice more in +# various HA modes. that takes forever, so leave those heavy tests for the +# hadoop-spark bundle. let's go fast on this one. +excludes: + - ganglia + - ganglia-node + - rsyslog + - rsyslog-forwarder-ha + - spark From c06e6ed5f1db9771ac7f857370a4ded7c1098862 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 02:20:02 +0000 Subject: [PATCH 07/15] strip --to placement back out of bundle tests; doesnt seem to work --- bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py | 12 ++++++++++++ bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py | 12 ++++++++++++ .../juju/hadoop-processing/tests/01-bundle.py | 12 ++++++++++++ bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py | 12 ++++++++++++ .../juju/spark-processing/tests/01-bundle.py | 13 +++++++++++++ 5 files changed, 61 insertions(+) diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py index 96a238366..854fce7bf 100755 --- a/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py @@ -34,6 +34,18 @@ def setUpClass(cls): bun = f.read() bundle = yaml.safe_load(bun) + # NB: strip machine ('to') placement. We don't seem to be guaranteed + # the same machine numbering after the initial bundletester deployment, + # so we might fail when redeploying --to a specific machine to run + # these bundle tests. This is ok because all charms in this bundle are + # using 'reset: false', so we'll already have our deployment just the + # way we want it by the time this test runs. This was originally + # raised as: + # https://github.com/juju/amulet/issues/148 + for service, service_config in bundle['services'].items(): + if 'to' in service_config: + del service_config['to'] + cls.d.load(bundle) cls.d.setup(timeout=3600) diff --git a/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py index ee3536971..34f974363 100755 --- a/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py @@ -34,6 +34,18 @@ def setUpClass(cls): bun = f.read() bundle = yaml.safe_load(bun) + # NB: strip machine ('to') placement. We don't seem to be guaranteed + # the same machine numbering after the initial bundletester deployment, + # so we might fail when redeploying --to a specific machine to run + # these bundle tests. This is ok because all charms in this bundle are + # using 'reset: false', so we'll already have our deployment just the + # way we want it by the time this test runs. This was originally + # raised as: + # https://github.com/juju/amulet/issues/148 + for service, service_config in bundle['services'].items(): + if 'to' in service_config: + del service_config['to'] + cls.d.load(bundle) cls.d.setup(timeout=3600) # we need units reporting ready before we attempt our smoke tests diff --git a/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py index b10ed2255..51d1c3d11 100755 --- a/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-processing/tests/01-bundle.py @@ -34,6 +34,18 @@ def setUpClass(cls): bun = f.read() bundle = yaml.safe_load(bun) + # NB: strip machine ('to') placement. We don't seem to be guaranteed + # the same machine numbering after the initial bundletester deployment, + # so we might fail when redeploying --to a specific machine to run + # these bundle tests. This is ok because all charms in this bundle are + # using 'reset: false', so we'll already have our deployment just the + # way we want it by the time this test runs. This was originally + # raised as: + # https://github.com/juju/amulet/issues/148 + for service, service_config in bundle['services'].items(): + if 'to' in service_config: + del service_config['to'] + cls.d.load(bundle) cls.d.setup(timeout=3600) diff --git a/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py index e8a076610..1436aa5c1 100755 --- a/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py @@ -34,6 +34,18 @@ def setUpClass(cls): bun = f.read() bundle = yaml.safe_load(bun) + # NB: strip machine ('to') placement. We don't seem to be guaranteed + # the same machine numbering after the initial bundletester deployment, + # so we might fail when redeploying --to a specific machine to run + # these bundle tests. This is ok because all charms in this bundle are + # using 'reset: false', so we'll already have our deployment just the + # way we want it by the time this test runs. This was originally + # raised as: + # https://github.com/juju/amulet/issues/148 + for service, service_config in bundle['services'].items(): + if 'to' in service_config: + del service_config['to'] + cls.d.load(bundle) cls.d.setup(timeout=3600) diff --git a/bigtop-deploy/juju/spark-processing/tests/01-bundle.py b/bigtop-deploy/juju/spark-processing/tests/01-bundle.py index 778213615..d76e03853 100755 --- a/bigtop-deploy/juju/spark-processing/tests/01-bundle.py +++ b/bigtop-deploy/juju/spark-processing/tests/01-bundle.py @@ -31,8 +31,21 @@ def setUpClass(cls): bun = f.read() bundle = yaml.safe_load(bun) + # NB: strip machine ('to') placement. We don't seem to be guaranteed + # the same machine numbering after the initial bundletester deployment, + # so we might fail when redeploying --to a specific machine to run + # these bundle tests. This is ok because all charms in this bundle are + # using 'reset: false', so we'll already have our deployment just the + # way we want it by the time this test runs. This was originally + # raised as: + # https://github.com/juju/amulet/issues/148 + for service, service_config in bundle['services'].items(): + if 'to' in service_config: + del service_config['to'] + cls.d.load(bundle) cls.d.setup(timeout=3600) + cls.d.sentry.wait_for_messages({'spark': 'ready (standalone - HA)'}, timeout=3600) cls.spark = cls.d.sentry['spark'][0] cls.zookeeper = cls.d.sentry['zookeeper'][0] From 85ab331a528cb2b6d17a1ec84d251bfaa977a392 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 04:34:05 +0000 Subject: [PATCH 08/15] explicit namespace for kafka/pig/zk tests (plus associated charm rev bumps) --- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/bundle.yaml | 4 ++-- bigtop-deploy/juju/hadoop-spark/bundle.yaml | 2 +- bigtop-deploy/juju/spark-processing/bundle.yaml | 2 +- .../src/charm/kafka/layer-kafka/tests/01-deploy.py | 2 +- .../src/charm/kafka/layer-kafka/tests/02-smoke-test.py | 2 +- .../src/charm/kafka/layer-kafka/tests/10-config-changed.py | 2 +- bigtop-packages/src/charm/pig/layer-pig/tests/01-deploy.py | 2 +- .../charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py | 2 +- .../charm/zookeeper/layer-zookeeper/tests/10-bind-address.py | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index 01608c4ef..8294f2710 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -54,7 +54,7 @@ services: - "2" - "3" zookeeper: - charm: "cs:xenial/zookeeper-14" + charm: "cs:xenial/zookeeper-15" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml index c92103009..5c0723114 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml @@ -52,7 +52,7 @@ services: to: - "4" zookeeper: - charm: "cs:xenial/zookeeper-14" + charm: "cs:xenial/zookeeper-15" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: @@ -63,7 +63,7 @@ services: - "6" - "7" kafka: - charm: "cs:xenial/kafka-9" + charm: "cs:xenial/kafka-10" constraints: "mem=3G" num_units: 1 annotations: diff --git a/bigtop-deploy/juju/hadoop-spark/bundle.yaml b/bigtop-deploy/juju/hadoop-spark/bundle.yaml index 3eb1b4e87..91c15b630 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle.yaml @@ -54,7 +54,7 @@ services: to: - "5" zookeeper: - charm: "cs:xenial/zookeeper-14" + charm: "cs:xenial/zookeeper-15" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/spark-processing/bundle.yaml b/bigtop-deploy/juju/spark-processing/bundle.yaml index e467321a1..01eef5786 100644 --- a/bigtop-deploy/juju/spark-processing/bundle.yaml +++ b/bigtop-deploy/juju/spark-processing/bundle.yaml @@ -10,7 +10,7 @@ services: - "0" - "1" zookeeper: - charm: "cs:xenial/zookeeper-14" + charm: "cs:xenial/zookeeper-15" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py index 62d6a5594..f8f06791e 100755 --- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py +++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/01-deploy.py @@ -26,7 +26,7 @@ class TestDeploy(unittest.TestCase): @classmethod def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('kafka', charm='kafka') + cls.d.add('kafka', charm='cs:xenial/kafka') cls.d.add('zookeeper', charm='cs:xenial/zookeeper') cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper') diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py index f396bdb3c..c688c6992 100755 --- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py +++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/02-smoke-test.py @@ -26,7 +26,7 @@ class TestDeploy(unittest.TestCase): @classmethod def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('kafka', charm='kafka') + cls.d.add('kafka', charm='cs:xenial/kafka') cls.d.add('zookeeper', charm='cs:xenial/zookeeper') cls.d.relate('kafka:zookeeper', 'zookeeper:zookeeper') diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py index 27e875369..6c57e0dd2 100755 --- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py +++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py @@ -29,7 +29,7 @@ class TestConfigChanged(unittest.TestCase): @classmethod def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('kafka-test', charm='kafka') + cls.d.add('kafka-test', charm='cs:xenial/kafka') cls.d.add('zk-test', charm='cs:xenial/zookeeper') cls.d.relate('kafka-test:zookeeper', 'zk-test:zookeeper') diff --git a/bigtop-packages/src/charm/pig/layer-pig/tests/01-deploy.py b/bigtop-packages/src/charm/pig/layer-pig/tests/01-deploy.py index c24617db1..53364c18d 100755 --- a/bigtop-packages/src/charm/pig/layer-pig/tests/01-deploy.py +++ b/bigtop-packages/src/charm/pig/layer-pig/tests/01-deploy.py @@ -27,7 +27,7 @@ class TestDeploy(unittest.TestCase): @classmethod def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('pig', 'pig') + cls.d.add('pig', 'cs:xenial/pig') cls.d.setup(timeout=1800) cls.d.sentry.wait_for_messages({'pig': re.compile('ready')}, timeout=1800) diff --git a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py index feb45f2a2..f9a008a27 100755 --- a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py +++ b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/01-deploy-smoke.py @@ -29,7 +29,7 @@ class TestDeploy(unittest.TestCase): def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('zookeeper', charm='zookeeper', units=3) + cls.d.add('zookeeper', charm='cs:xenial/zookeeper', units=3) cls.d.setup(timeout=1800) cls.d.sentry.wait_for_messages({'zookeeper': re.compile('ready')}, diff --git a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py index 9e6068651..70bb0a5fc 100755 --- a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py +++ b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py @@ -32,7 +32,7 @@ class TestBindClientPort(unittest.TestCase): def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') - cls.d.add('zk-test', charm='zookeeper') + cls.d.add('zk-test', charm='cs:xenial/zookeeper') cls.d.setup(timeout=TIMEOUT) cls.d.sentry.wait_for_messages({'zk-test': re.compile('ready')}, From a6f74a3d54d95b32795d0c3daac7468275b17684 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 18:45:21 +0000 Subject: [PATCH 09/15] handle tearDownClass in the tests better --- .../layer-kafka/tests/10-config-changed.py | 10 +++++++++- .../layer-spark/tests/03-scale-standalone.py | 11 +++++----- .../spark/layer-spark/tests/10-test-ha.py | 13 ++++++------ .../layer-zookeeper/tests/10-bind-address.py | 20 +++++++++++++++---- 4 files changed, 36 insertions(+), 18 deletions(-) diff --git a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py index 6c57e0dd2..a27f7833d 100755 --- a/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py +++ b/bigtop-packages/src/charm/kafka/layer-kafka/tests/10-config-changed.py @@ -40,7 +40,15 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - cls.d.remove_service('zk-test', 'kafka-test') + # NB: seems to be a remove_service issue with amulet. However, the + # unit does still get removed. Pass OSError for now: + # OSError: juju command failed ['remove-application', 'zk-test']: + # ERROR allocation for service ...zk-test... owned by ... not found + try: + cls.d.remove_service('zk-test', 'kafka-test') + except OSError as e: + print("IGNORE: Amulet remove_service failed: {}".format(e)) + pass def test_bind_network_interface(self): """ diff --git a/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py b/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py index 4392fac8e..33479f3d6 100755 --- a/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py +++ b/bigtop-packages/src/charm/spark/layer-spark/tests/03-scale-standalone.py @@ -33,15 +33,14 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + # NB: seems to be a remove_service issue with amulet. However, the + # unit does still get removed. Pass OSError for now: + # OSError: juju command failed ['remove-application', 'zk-test']: + # ERROR allocation for service ...zk-test... owned by ... not found try: cls.d.remove_service('spark-test-scale') except OSError as e: - # NB: it looks like remove_service complains if it cannot tear down - # all the units that we spun up in setupClass. Since we manually - # kill units as part of the tests below, allow remove-application - # to fail with an OSError. Pay attention here (kwmonroe) in - # case this needs to be reported as an amulet issue. - print("Amulet remove-service returned: {}".format(e.errno)) + print("IGNORE: Amulet remove_service failed: {}".format(e)) pass def test_scaleup(self): diff --git a/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py b/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py index b1aa1e6dd..4a1ba838c 100755 --- a/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py +++ b/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py @@ -37,15 +37,14 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): + # NB: seems to be a remove_service issue with amulet. However, the + # unit does still get removed. Pass OSError for now: + # OSError: juju command failed ['remove-application', 'zk-test']: + # ERROR allocation for service ...zk-test... owned by ... not found try: - cls.d.remove_service('zk-test', 'spark-test-ha') + cls.d.remove_service('spark-test-ha', 'zk-test') except OSError as e: - # NB: it looks like remove_service complains if it cannot tear down - # all the units that we spun up in setupClass. Since we manually - # kill units as part of the tests below, allow remove-application - # to fail with an OSError. Pay attention here (kwmonroe) in - # case this needs to be reported as an amulet issue. - print("Amulet remove-service returned: {}".format(e.errno)) + print("IGNORE: Amulet remove_service failed: {}".format(e)) pass def test_master_selected(self): diff --git a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py index 70bb0a5fc..17ef3a2a3 100755 --- a/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py +++ b/bigtop-packages/src/charm/zookeeper/layer-zookeeper/tests/10-bind-address.py @@ -35,13 +35,21 @@ def setUpClass(cls): cls.d.add('zk-test', charm='cs:xenial/zookeeper') cls.d.setup(timeout=TIMEOUT) - cls.d.sentry.wait_for_messages({'zk-test': re.compile('ready')}, + cls.d.sentry.wait_for_messages({'zk-test': re.compile('^ready')}, timeout=TIMEOUT) cls.unit = cls.d.sentry['zk-test'][0] @classmethod def tearDownClass(cls): - cls.d.remove_service('zk-test') + # NB: seems to be a remove_service issue with amulet. However, the + # unit does still get removed. Pass OSError for now: + # OSError: juju command failed ['remove-application', 'zk-test']: + # ERROR allocation for service ...zk-test... owned by ... not found + try: + cls.d.remove_service('zk-test') + except OSError as e: + print("IGNORE: Amulet remove_service failed: {}".format(e)) + pass def test_bind_port(self): """ @@ -76,13 +84,15 @@ def test_bind_port(self): "^clientPortAddress=\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}.*") self.assertTrue(matcher.match(ret)) - # Verify that smoke tests still run + # Verify that smoke tests still run and the unit returns to 'ready' smk_uuid = self.unit.run_action('smoke-test') # 'zookeeper' smoke takes a while (bigtop tests are slow) result = self.d.action_fetch(smk_uuid, timeout=1800, full_output=True) # actions set status=completed on success if (result['status'] != "completed"): self.fail('Zookeeper smoke-test failed: %s' % result) + self.d.sentry.wait_for_messages({'zk-test': re.compile('^ready')}, + timeout=TIMEOUT) def test_reset_bindings(self): """ @@ -99,13 +109,15 @@ def test_reset_bindings(self): matcher = re.compile("^clientPortAddress=0\.0\.0\.0.*") self.assertTrue(matcher.match(ret)) - # Verify that smoke tests still run + # Verify that smoke tests still run and the unit returns to 'ready' smk_uuid = self.unit.run_action('smoke-test') # 'zookeeper' smoke takes a while (bigtop tests are slow) result = self.d.action_fetch(smk_uuid, timeout=1800, full_output=True) # actions set status=completed on success if (result['status'] != "completed"): self.fail('Zookeeper smoke-test failed: %s' % result) + self.d.sentry.wait_for_messages({'zk-test': re.compile('^ready')}, + timeout=TIMEOUT) if __name__ == '__main__': From 23ea636c1aea053af6ddae6513d01e480548af93 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 18:46:30 +0000 Subject: [PATCH 10/15] bump revs for spark/kafka/zk test fixes; only release bundles (not charms) on CI success --- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/bundle.yaml | 4 ++-- bigtop-deploy/juju/hadoop-kafka/ci-info.yaml | 16 ++++++++-------- .../juju/hadoop-processing/ci-info.yaml | 12 ++++++------ bigtop-deploy/juju/hadoop-spark/bundle.yaml | 4 ++-- bigtop-deploy/juju/hadoop-spark/ci-info.yaml | 16 ++++++++-------- bigtop-deploy/juju/spark-processing/bundle.yaml | 4 ++-- bigtop-deploy/juju/spark-processing/ci-info.yaml | 6 +++--- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index 8294f2710..de24f626a 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -54,7 +54,7 @@ services: - "2" - "3" zookeeper: - charm: "cs:xenial/zookeeper-15" + charm: "cs:xenial/zookeeper-16" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml index 5c0723114..31e596187 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml @@ -52,7 +52,7 @@ services: to: - "4" zookeeper: - charm: "cs:xenial/zookeeper-15" + charm: "cs:xenial/zookeeper-16" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: @@ -63,7 +63,7 @@ services: - "6" - "7" kafka: - charm: "cs:xenial/kafka-10" + charm: "cs:xenial/kafka-11" constraints: "mem=3G" num_units: 1 annotations: diff --git a/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml b/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml index 56f11bbf6..fa4df9b70 100644 --- a/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/ci-info.yaml @@ -2,33 +2,33 @@ bundle: name: hadoop-kafka namespace: bigdata-charmers release: true - to-channel: beta + to-channel: edge charm-upgrade: hadoop-namenode: from-channel: edge - release: true + release: false to-channel: beta hadoop-resourcemanager: from-channel: edge - release: true + release: false to-channel: beta hadoop-slave: from-channel: edge - release: true + release: false to-channel: beta hadoop-client: from-channel: edge - release: true + release: false to-channel: beta hadoop-plugin: from-channel: edge - release: true + release: false to-channel: beta kafka: from-channel: edge - release: true + release: false to-channel: beta zookeeper: from-channel: edge - release: true + release: false to-channel: beta diff --git a/bigtop-deploy/juju/hadoop-processing/ci-info.yaml b/bigtop-deploy/juju/hadoop-processing/ci-info.yaml index 72e208253..38ec28b2b 100644 --- a/bigtop-deploy/juju/hadoop-processing/ci-info.yaml +++ b/bigtop-deploy/juju/hadoop-processing/ci-info.yaml @@ -2,25 +2,25 @@ bundle: name: hadoop-processing namespace: bigdata-charmers release: true - to-channel: beta + to-channel: edge charm-upgrade: hadoop-namenode: from-channel: edge - release: true + release: false to-channel: beta hadoop-resourcemanager: from-channel: edge - release: true + release: false to-channel: beta hadoop-slave: from-channel: edge - release: true + release: false to-channel: beta hadoop-client: from-channel: edge - release: true + release: false to-channel: beta hadoop-plugin: from-channel: edge - release: true + release: false to-channel: beta diff --git a/bigtop-deploy/juju/hadoop-spark/bundle.yaml b/bigtop-deploy/juju/hadoop-spark/bundle.yaml index 91c15b630..5d5716c0e 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle.yaml @@ -43,7 +43,7 @@ services: to: - "4" spark: - charm: "cs:xenial/spark-23" + charm: "cs:xenial/spark-24" constraints: "mem=7G root-disk=32G" num_units: 1 options: @@ -54,7 +54,7 @@ services: to: - "5" zookeeper: - charm: "cs:xenial/zookeeper-15" + charm: "cs:xenial/zookeeper-16" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/hadoop-spark/ci-info.yaml b/bigtop-deploy/juju/hadoop-spark/ci-info.yaml index ae79aee78..17aff8bc7 100644 --- a/bigtop-deploy/juju/hadoop-spark/ci-info.yaml +++ b/bigtop-deploy/juju/hadoop-spark/ci-info.yaml @@ -2,33 +2,33 @@ bundle: name: hadoop-spark namespace: bigdata-charmers release: true - to-channel: beta + to-channel: edge charm-upgrade: hadoop-namenode: from-channel: edge - release: true + release: false to-channel: beta hadoop-resourcemanager: from-channel: edge - release: true + release: false to-channel: beta hadoop-slave: from-channel: edge - release: true + release: false to-channel: beta hadoop-client: from-channel: edge - release: true + release: false to-channel: beta hadoop-plugin: from-channel: edge - release: true + release: false to-channel: beta spark: from-channel: edge - release: true + release: false to-channel: beta zookeeper: from-channel: edge - release: true + release: false to-channel: beta diff --git a/bigtop-deploy/juju/spark-processing/bundle.yaml b/bigtop-deploy/juju/spark-processing/bundle.yaml index 01eef5786..f3f81e1ae 100644 --- a/bigtop-deploy/juju/spark-processing/bundle.yaml +++ b/bigtop-deploy/juju/spark-processing/bundle.yaml @@ -1,6 +1,6 @@ services: spark: - charm: "cs:xenial/spark-23" + charm: "cs:xenial/spark-24" constraints: "mem=7G root-disk=32G" num_units: 2 annotations: @@ -10,7 +10,7 @@ services: - "0" - "1" zookeeper: - charm: "cs:xenial/zookeeper-15" + charm: "cs:xenial/zookeeper-16" constraints: "mem=3G root-disk=32G" num_units: 3 annotations: diff --git a/bigtop-deploy/juju/spark-processing/ci-info.yaml b/bigtop-deploy/juju/spark-processing/ci-info.yaml index 4402a9a5a..ab67fd546 100644 --- a/bigtop-deploy/juju/spark-processing/ci-info.yaml +++ b/bigtop-deploy/juju/spark-processing/ci-info.yaml @@ -2,13 +2,13 @@ bundle: name: spark-processing namespace: bigdata-charmers release: true - to-channel: beta + to-channel: edge charm-upgrade: spark: from-channel: edge - release: true + release: false to-channel: beta zookeeper: from-channel: edge - release: true + release: false to-channel: beta From 9657bc7e90b21ad6d8f1be74d0ab8514055ee62f Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 18:54:55 +0000 Subject: [PATCH 11/15] add ci-info for hbase bundle --- bigtop-deploy/juju/hadoop-hbase/ci-info.yaml | 34 ++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 bigtop-deploy/juju/hadoop-hbase/ci-info.yaml diff --git a/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml b/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml new file mode 100644 index 000000000..aa6f23010 --- /dev/null +++ b/bigtop-deploy/juju/hadoop-hbase/ci-info.yaml @@ -0,0 +1,34 @@ +bundle: + name: hadoop-hbase + namespace: bigdata-charmers + release: true + to-channel: edge +charm-upgrade: + hadoop-namenode: + from-channel: edge + release: false + to-channel: beta + hadoop-resourcemanager: + from-channel: edge + release: false + to-channel: beta + hadoop-slave: + from-channel: edge + release: false + to-channel: beta + hadoop-client: + from-channel: edge + release: false + to-channel: beta + hadoop-plugin: + from-channel: edge + release: false + to-channel: beta + hbase: + from-channel: edge + release: false + to-channel: beta + zookeeper: + from-channel: edge + release: false + to-channel: beta From 53b8330890ed64669bf9017ab692d36d2c2ff77f Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 19:22:36 +0000 Subject: [PATCH 12/15] use strings vs integers for large ints to prevent scientific notation --- .../hadoop/layer-hadoop-resourcemanager/actions.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml b/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml index bdd28beaa..ebeaa4bef 100644 --- a/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml +++ b/bigtop-packages/src/charm/hadoop/layer-hadoop-resourcemanager/actions.yaml @@ -77,15 +77,15 @@ testdfsio: default: 1000 buffersize: description: Buffer size in bytes - type: integer - default: 1000000 + type: string + default: "1000000" teragen: description: Generate data with teragen params: size: description: The number of 100 byte rows, default to 1GB of data to generate - type: integer - default: 10000000 + type: string + default: "10000000" indir: description: HDFS directory where generated data is stored type: string @@ -103,8 +103,8 @@ terasort: default: '/benchmarks/TeraSort' size: description: The number of 100 byte rows, default to 1GB of data to generate and sort - type: integer - default: 10000000 + type: string + default: "10000000" maps: description: The default number of map tasks per job. 1-20 type: integer From db27e4603b0490470005344450b9106a14eae27b Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Sat, 25 Mar 2017 19:25:40 +0000 Subject: [PATCH 13/15] push RM fix for actions with integers --- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-processing/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-spark/bundle.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index de24f626a..9d24726bd 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-12" + charm: "cs:xenial/hadoop-resourcemanager-13" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml index 31e596187..0bd4c23b9 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-12" + charm: "cs:xenial/hadoop-resourcemanager-13" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: diff --git a/bigtop-deploy/juju/hadoop-processing/bundle.yaml b/bigtop-deploy/juju/hadoop-processing/bundle.yaml index 90df10c6d..7eee0047b 100644 --- a/bigtop-deploy/juju/hadoop-processing/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-processing/bundle.yaml @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-12" + charm: "cs:xenial/hadoop-resourcemanager-13" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: diff --git a/bigtop-deploy/juju/hadoop-spark/bundle.yaml b/bigtop-deploy/juju/hadoop-spark/bundle.yaml index 5d5716c0e..17a543831 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle.yaml @@ -9,7 +9,7 @@ services: to: - "0" resourcemanager: - charm: "cs:xenial/hadoop-resourcemanager-12" + charm: "cs:xenial/hadoop-resourcemanager-13" constraints: "mem=7G root-disk=32G" num_units: 1 annotations: From 16bff6d162e23d3c7299c9ddd74fd00354622b08 Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Mon, 27 Mar 2017 16:43:01 +0000 Subject: [PATCH 14/15] do not pre-deploy the bundles (1st bundle test will do this for us) --- bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py | 1 + bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py | 1 + bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml | 2 +- bigtop-deploy/juju/hadoop-processing/tests/tests.yaml | 2 +- bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py | 1 + bigtop-deploy/juju/hadoop-spark/tests/tests.yaml | 2 +- bigtop-deploy/juju/spark-processing/tests/01-bundle.py | 6 +++++- bigtop-deploy/juju/spark-processing/tests/tests.yaml | 2 +- .../src/charm/spark/layer-spark/tests/10-test-ha.py | 2 ++ 10 files changed, 15 insertions(+), 6 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py index 854fce7bf..166ac5465 100755 --- a/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-hbase/tests/01-bundle.py @@ -55,6 +55,7 @@ def setUpClass(cls): 'resourcemanager': re.compile('ready'), 'slave': re.compile('ready'), 'hbase': re.compile('ready'), + 'zookeeper': re.compile('ready'), }, timeout=3600) cls.hdfs = cls.d.sentry['namenode'][0] cls.yarn = cls.d.sentry['resourcemanager'][0] diff --git a/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml index 28516a6ff..a3b7803f6 100644 --- a/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/tests/tests.yaml @@ -1,5 +1,5 @@ reset: false -deployment_timeout: 7200 +bundle_deploy: false sources: - 'ppa:juju/stable' packages: diff --git a/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py index 34f974363..fb113fc60 100755 --- a/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-kafka/tests/01-bundle.py @@ -53,6 +53,7 @@ def setUpClass(cls): 'namenode': re.compile('ready'), 'resourcemanager': re.compile('ready'), 'slave': re.compile('ready'), + 'zookeeper': re.compile('ready'), }, timeout=3600) cls.hdfs = cls.d.sentry['namenode'][0] cls.yarn = cls.d.sentry['resourcemanager'][0] diff --git a/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml b/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml index b95174212..a3b7803f6 100644 --- a/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/tests/tests.yaml @@ -1,5 +1,5 @@ reset: false -deployment_timeout: 3600 +bundle_deploy: false sources: - 'ppa:juju/stable' packages: diff --git a/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml b/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml index b95174212..a3b7803f6 100644 --- a/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-processing/tests/tests.yaml @@ -1,5 +1,5 @@ reset: false -deployment_timeout: 3600 +bundle_deploy: false sources: - 'ppa:juju/stable' packages: diff --git a/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py b/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py index 1436aa5c1..1dc414754 100755 --- a/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py +++ b/bigtop-deploy/juju/hadoop-spark/tests/01-bundle.py @@ -55,6 +55,7 @@ def setUpClass(cls): 'resourcemanager': re.compile('ready'), 'slave': re.compile('ready'), 'spark': re.compile('ready'), + 'zookeeper': re.compile('ready'), }, timeout=3600) cls.hdfs = cls.d.sentry['namenode'][0] cls.yarn = cls.d.sentry['resourcemanager'][0] diff --git a/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml b/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml index b95174212..a3b7803f6 100644 --- a/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml +++ b/bigtop-deploy/juju/hadoop-spark/tests/tests.yaml @@ -1,5 +1,5 @@ reset: false -deployment_timeout: 3600 +bundle_deploy: false sources: - 'ppa:juju/stable' packages: diff --git a/bigtop-deploy/juju/spark-processing/tests/01-bundle.py b/bigtop-deploy/juju/spark-processing/tests/01-bundle.py index d76e03853..6275d4374 100755 --- a/bigtop-deploy/juju/spark-processing/tests/01-bundle.py +++ b/bigtop-deploy/juju/spark-processing/tests/01-bundle.py @@ -17,6 +17,7 @@ import amulet import os +import re import unittest import yaml @@ -46,7 +47,10 @@ def setUpClass(cls): cls.d.load(bundle) cls.d.setup(timeout=3600) - cls.d.sentry.wait_for_messages({'spark': 'ready (standalone - HA)'}, timeout=3600) + # we need units reporting ready before we attempt our smoke tests + cls.d.sentry.wait_for_messages({'spark': 'ready (standalone - HA)', + 'zookeeper': re.compile('ready'), + }, timeout=3600) cls.spark = cls.d.sentry['spark'][0] cls.zookeeper = cls.d.sentry['zookeeper'][0] diff --git a/bigtop-deploy/juju/spark-processing/tests/tests.yaml b/bigtop-deploy/juju/spark-processing/tests/tests.yaml index e4b472ea4..8a28f2bd8 100644 --- a/bigtop-deploy/juju/spark-processing/tests/tests.yaml +++ b/bigtop-deploy/juju/spark-processing/tests/tests.yaml @@ -1,5 +1,5 @@ reset: false -deployment_timeout: 3600 +bundle_deploy: false sources: - 'ppa:juju/stable' packages: diff --git a/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py b/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py index 4a1ba838c..99c604f9f 100755 --- a/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py +++ b/bigtop-packages/src/charm/spark/layer-spark/tests/10-test-ha.py @@ -30,8 +30,10 @@ def setUpClass(cls): cls.d = amulet.Deployment(series='xenial') cls.d.add('spark-test-ha', 'cs:xenial/spark', units=3) cls.d.add('zk-test', 'cs:xenial/zookeeper') + cls.d.relate('zk-test:zookeeper', 'spark-test-ha:zookeeper') cls.d.expose('spark-test-ha') + cls.d.setup(timeout=3600) cls.d.sentry.wait(timeout=3600) From 7f2607c316d12c1770444bf767dd3168f0b68a5c Mon Sep 17 00:00:00 2001 From: Kevin W Monroe Date: Mon, 27 Mar 2017 19:59:20 +0000 Subject: [PATCH 15/15] fix ganglia-node error discovered by matrix chaos --- bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml | 2 +- bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml | 2 +- bigtop-deploy/juju/hadoop-hbase/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml | 2 +- bigtop-deploy/juju/hadoop-kafka/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml | 2 +- bigtop-deploy/juju/hadoop-processing/bundle-local.yaml | 2 +- bigtop-deploy/juju/hadoop-processing/bundle.yaml | 2 +- bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml | 2 +- bigtop-deploy/juju/hadoop-spark/bundle-local.yaml | 2 +- bigtop-deploy/juju/hadoop-spark/bundle.yaml | 2 +- bigtop-deploy/juju/spark-processing/bundle-dev.yaml | 2 +- bigtop-deploy/juju/spark-processing/bundle-local.yaml | 2 +- bigtop-deploy/juju/spark-processing/bundle.yaml | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml index 2298cbb07..e4737e16d 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle-dev.yaml @@ -73,7 +73,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml index 473b5890b..7ae57f66a 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle-local.yaml @@ -73,7 +73,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml index 9d24726bd..95b63d708 100644 --- a/bigtop-deploy/juju/hadoop-hbase/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-hbase/bundle.yaml @@ -73,7 +73,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml index 36053a816..45b821fa9 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle-dev.yaml @@ -89,7 +89,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml index 500503c8a..bd8987284 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle-local.yaml @@ -89,7 +89,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml index 0bd4c23b9..80aa8951d 100644 --- a/bigtop-deploy/juju/hadoop-kafka/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-kafka/bundle.yaml @@ -89,7 +89,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml index 00fbdffed..20ae8afc2 100644 --- a/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml +++ b/bigtop-deploy/juju/hadoop-processing/bundle-dev.yaml @@ -51,7 +51,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml b/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml index 39e7a2a6b..b277df4d0 100644 --- a/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml +++ b/bigtop-deploy/juju/hadoop-processing/bundle-local.yaml @@ -51,7 +51,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-processing/bundle.yaml b/bigtop-deploy/juju/hadoop-processing/bundle.yaml index 7eee0047b..fcd1017ed 100644 --- a/bigtop-deploy/juju/hadoop-processing/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-processing/bundle.yaml @@ -51,7 +51,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml b/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml index 0bd529c0c..a42242e50 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle-dev.yaml @@ -73,7 +73,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml b/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml index 0c172effa..bffc4592d 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle-local.yaml @@ -73,7 +73,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/hadoop-spark/bundle.yaml b/bigtop-deploy/juju/hadoop-spark/bundle.yaml index 17a543831..cfbdd8b6a 100644 --- a/bigtop-deploy/juju/hadoop-spark/bundle.yaml +++ b/bigtop-deploy/juju/hadoop-spark/bundle.yaml @@ -73,7 +73,7 @@ services: to: - "4" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/spark-processing/bundle-dev.yaml b/bigtop-deploy/juju/spark-processing/bundle-dev.yaml index df8306f41..af7621405 100644 --- a/bigtop-deploy/juju/spark-processing/bundle-dev.yaml +++ b/bigtop-deploy/juju/spark-processing/bundle-dev.yaml @@ -29,7 +29,7 @@ services: to: - "5" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/spark-processing/bundle-local.yaml b/bigtop-deploy/juju/spark-processing/bundle-local.yaml index 063d5e7ae..63cdc6f50 100644 --- a/bigtop-deploy/juju/spark-processing/bundle-local.yaml +++ b/bigtop-deploy/juju/spark-processing/bundle-local.yaml @@ -29,7 +29,7 @@ services: to: - "5" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400" diff --git a/bigtop-deploy/juju/spark-processing/bundle.yaml b/bigtop-deploy/juju/spark-processing/bundle.yaml index f3f81e1ae..70ef4cf19 100644 --- a/bigtop-deploy/juju/spark-processing/bundle.yaml +++ b/bigtop-deploy/juju/spark-processing/bundle.yaml @@ -29,7 +29,7 @@ services: to: - "5" ganglia-node: - charm: "cs:~bigdata-dev/xenial/ganglia-node-6" + charm: "cs:~bigdata-dev/xenial/ganglia-node-7" annotations: gui-x: "250" gui-y: "400"