Skip to content

Commit

Permalink
Update Bigtable Programmatic Scaling Example (#1003)
Browse files Browse the repository at this point in the history
* Update Bigtable Programmatic Scaling Example

* Rename "autoscaling" to "metricscaler" and the the term "programmatic
scaling"
* Remove `strategies.py` to simplify example
* Fix wrong sleep length bug
* Add maximum node count

* hegemonic review
  • Loading branch information
waprin committed Jun 27, 2017
1 parent 57fbe30 commit bc0924a
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 112 deletions.
51 changes: 0 additions & 51 deletions bigtable/autoscaler/strategies.py

This file was deleted.

30 changes: 0 additions & 30 deletions bigtable/autoscaler/strategies_test.py

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ Install Dependencies
Samples
-------------------------------------------------------------------------------

Autoscaling example
Metricscaling example
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++


Expand All @@ -83,12 +83,12 @@ To run this sample:

.. code-block:: bash
$ python autoscaler.py
$ python metricscaler.py
usage: autoscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
usage: metricscaler.py [-h] [--high_cpu_threshold HIGH_CPU_THRESHOLD]
[--low_cpu_threshold LOW_CPU_THRESHOLD]
[--short_sleep SHORT_SLEEP] [--long_sleep LONG_SLEEP]
bigtable_instance bigtable_cluster
Scales Cloud Bigtable clusters based on CPU usage.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ setup:
- install_deps

samples:
- name: Autoscaling example
file: autoscaler.py
- name: Metricscaling example
file: metricscaler.py
show_help: true

cloud_client_library: true
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
from google.cloud import bigtable
from google.cloud import monitoring

import strategies


def get_cpu_load():
Expand Down Expand Up @@ -52,6 +51,23 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
scale_up (bool): If true, scale up, otherwise scale down
"""
_MIN_NODE_COUNT = 3
"""
The minimum number of nodes to use. The default minimum is 3. If you have a
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
metric is useful in figuring out the minimum number of nodes.
"""

_MAX_NODE_COUNT = 30
"""
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
If you need more quota, you can request more by following the instructions
<a href="https://cloud.google.com/bigtable/quota">here</a>.
"""

_SIZE_CHANGE_STEP = 3
"""The number of nodes to change the cluster by."""
# [START bigtable_scale]
bigtable_client = bigtable.Client(admin=True)
instance = bigtable_client.instance(bigtable_instance)
Expand All @@ -62,21 +78,21 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):

current_node_count = cluster.serve_nodes

if current_node_count <= 3 and not scale_up:
# Can't downscale lower than 3 nodes
return

if scale_up:
strategies_dict = strategies.UPSCALE_STRATEGIES
if current_node_count < _MAX_NODE_COUNT:
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled up from {} to {} nodes.'.format(
current_node_count, new_node_count))
else:
strategies_dict = strategies.DOWNSCALE_STRATEGIES

strategy = strategies_dict['incremental']
new_node_count = strategy(cluster.serve_nodes)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled from {} up to {} nodes.'.format(
current_node_count, new_node_count))
if current_node_count > _MIN_NODE_COUNT:
new_node_count = max(
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
cluster.serve_nodes = new_node_count
cluster.update()
print('Scaled down from {} to {} nodes.'.format(
current_node_count, new_node_count))
# [END bigtable_scale]


Expand Down Expand Up @@ -104,7 +120,7 @@ def main(
time.sleep(long_sleep)
elif cluster_cpu < low_cpu_threshold:
scale_bigtable(bigtable_instance, bigtable_cluster, False)
time.sleep(short_sleep)
time.sleep(long_sleep)
else:
print('CPU within threshold, sleeping.')
time.sleep(short_sleep)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.

"""Unit and system tests for autoscaler.py"""
"""Unit and system tests for metricscaler.py"""

import os
import time

from google.cloud import bigtable
from mock import patch

from autoscaler import get_cpu_load
from autoscaler import main
from autoscaler import scale_bigtable
from metricscaler import _SIZE_CHANGE_STEP
from metricscaler import get_cpu_load
from metricscaler import main
from metricscaler import scale_bigtable

# tests assume instance and cluster have the same ID
BIGTABLE_INSTANCE = os.environ['BIGTABLE_CLUSTER']
Expand All @@ -49,7 +50,7 @@ def test_scale_bigtable():
cluster.reload()

new_node_count = cluster.serve_nodes
assert (new_node_count == (original_node_count + 2))
assert (new_node_count == (original_node_count + _SIZE_CHANGE_STEP))

scale_bigtable(BIGTABLE_INSTANCE, BIGTABLE_INSTANCE, False)
time.sleep(3)
Expand All @@ -59,10 +60,9 @@ def test_scale_bigtable():


# Unit test for logic

@patch('time.sleep')
@patch('autoscaler.get_cpu_load')
@patch('autoscaler.scale_bigtable')
@patch('metricscaler.get_cpu_load')
@patch('metricscaler.scale_bigtable')
def test_main(scale_bigtable, get_cpu_load, sleep):
SHORT_SLEEP = 5
LONG_SLEEP = 10
Expand Down
File renamed without changes.

0 comments on commit bc0924a

Please sign in to comment.