From 32260776443fd5d6cbb8dd2af3fd4c2fea2c1a6d Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Tue, 24 Sep 2013 15:25:32 -0700 Subject: [PATCH] Provide config option to cap events per stack Previously users could write to the events table infinitely. With this change Heat will automatically prune some events if the maximum is reached. The behavior can be disabled by setting max_events_per_stack to zero. Change-Id: I4fb2fefbd9bcd10ce1767ddf58a870206a6482a1 Fixes-Bug: #1209492 --- etc/heat/heat.conf.sample | 11 +++++++++++ heat/common/config.py | 15 ++++++++++++--- heat/db/sqlalchemy/api.py | 31 +++++++++++++++++++++++++++++++ heat/tests/test_event.py | 23 +++++++++++++++++++++++ 4 files changed, 77 insertions(+), 3 deletions(-) diff --git a/etc/heat/heat.conf.sample b/etc/heat/heat.conf.sample index cffb2164268..1444f9ba2fc 100644 --- a/etc/heat/heat.conf.sample +++ b/etc/heat/heat.conf.sample @@ -39,6 +39,17 @@ # one time. (integer value) #max_stacks_per_tenant=100 +# Controls how many events will be pruned whenever a stack's +# events exceed max_events_per_stack. Set this lower to keep +# more events at the expense of more frequent purges. (integer +# value) +#event_purge_batch_size=10 + +# Maximum events that will be available per stack. Older +# events will be deleted when this is reached. Set to 0 for +# unlimited events per stack. (integer value) +#max_events_per_stack=1000 + # Name of the engine node. This can be an opaque identifier.It # is not necessarily a hostname, FQDN, or IP address. (string # value) diff --git a/heat/common/config.py b/heat/common/config.py index 7fb50440b24..82b4ca5db8b 100644 --- a/heat/common/config.py +++ b/heat/common/config.py @@ -104,9 +104,18 @@ cfg.IntOpt('max_stacks_per_tenant', default=100, help=_('Maximum number of stacks any one tenant may have' - ' active at one time.'))] - - + ' active at one time.')), + cfg.IntOpt('event_purge_batch_size', + default=10, + help=_('Controls how many events will be pruned whenever a ' + ' stack\'s events exceed max_events_per_stack. Set this' + ' lower to keep more events at the expense of more' + ' frequent purges.')), + cfg.IntOpt('max_events_per_stack', + default=1000, + help=_('Maximum events that will be available per stack. Older' + ' events will be deleted when this is reached. Set to 0' + ' for unlimited events per stack.'))] rpc_opts = [ cfg.StrOpt('host', default=socket.gethostname(), diff --git a/heat/db/sqlalchemy/api.py b/heat/db/sqlalchemy/api.py index 65fe5ca2c8a..1cfa07c3cb7 100644 --- a/heat/db/sqlalchemy/api.py +++ b/heat/db/sqlalchemy/api.py @@ -17,9 +17,12 @@ from datetime import datetime from datetime import timedelta +from oslo.config import cfg import sqlalchemy from sqlalchemy.orm.session import Session +cfg.CONF.import_opt('max_events_per_stack', 'heat.common.config') + from heat.openstack.common.gettextutils import _ from heat.common import crypt @@ -339,7 +342,35 @@ def event_count_all_by_stack(context, stack_id): return _query_all_by_stack(context, stack_id).count() +def _delete_event_rows(context, stack_id, limit): + # MySQL does not support LIMIT in subqueries, + # sqlite does not support JOIN in DELETE. + # So we must manually supply the IN() values. + # pgsql SHOULD work with the pure DELETE/JOIN below but that must be + # confirmed via integration tests. + query = _query_all_by_stack(context, stack_id) + session = _session(context) + if 'postgres' not in session.connection().dialect.name: + ids = [r.id for r in query.order_by( + models.Event.id).limit(limit).all()] + q = session.query(models.Event).filter( + models.Event.id.in_(ids)) + else: + stmt = session.query( + models.Event.id).filter_by( + stack_id=stack_id).order_by( + models.Event.id).limit(limit).subquery() + q = query.join(stmt, models.Event.id == stmt.c.id) + return q.delete(synchronize_session='fetch') + + def event_create(context, values): + if 'stack_id' in values and cfg.CONF.max_events_per_stack: + if ((event_count_all_by_stack(context, values['stack_id']) >= + cfg.CONF.max_events_per_stack)): + # prune + _delete_event_rows( + context, values['stack_id'], cfg.CONF.event_purge_batch_size) event_ref = models.Event() event_ref.update(values) event_ref.save(_session(context)) diff --git a/heat/tests/test_event.py b/heat/tests/test_event.py index 5d21283f445..68d0a781708 100644 --- a/heat/tests/test_event.py +++ b/heat/tests/test_event.py @@ -12,6 +12,10 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo.config import cfg + +cfg.CONF.import_opt('event_purge_batch_size', 'heat.common.config') +cfg.CONF.import_opt('max_events_per_stack', 'heat.common.config') import heat.db.api as db_api from heat.engine import parser @@ -100,6 +104,25 @@ def test_load_given_stack_event(self): self.assertNotEqual(None, loaded_e.timestamp) self.assertEqual({'Foo': 'goo'}, loaded_e.resource_properties) + def test_store_caps_events(self): + cfg.CONF.set_override('event_purge_batch_size', 1) + cfg.CONF.set_override('max_events_per_stack', 1) + self.resource.resource_id_set('resource_physical_id') + + e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', + 'alabama', self.resource.properties, + self.resource.name, self.resource.type()) + e.store() + self.assertEquals(1, len(db_api.event_get_all_by_stack(self.ctx, + self.stack.id))) + e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', + 'arizona', self.resource.properties, + self.resource.name, self.resource.type()) + e.store() + events = db_api.event_get_all_by_stack(self.ctx, self.stack.id) + self.assertEquals(1, len(events)) + self.assertEqual('arizona', events[0].physical_resource_id) + def test_identifier(self): e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing', 'wibble', self.resource.properties,