Merge "Provide config option to cap events per stack"
This commit is contained in:
commit
14cb54b519
|
@ -39,6 +39,17 @@
|
||||||
# one time. (integer value)
|
# one time. (integer value)
|
||||||
#max_stacks_per_tenant=100
|
#max_stacks_per_tenant=100
|
||||||
|
|
||||||
|
# Controls how many events will be pruned whenever a stack's
|
||||||
|
# events exceed max_events_per_stack. Set this lower to keep
|
||||||
|
# more events at the expense of more frequent purges. (integer
|
||||||
|
# value)
|
||||||
|
#event_purge_batch_size=10
|
||||||
|
|
||||||
|
# Maximum events that will be available per stack. Older
|
||||||
|
# events will be deleted when this is reached. Set to 0 for
|
||||||
|
# unlimited events per stack. (integer value)
|
||||||
|
#max_events_per_stack=1000
|
||||||
|
|
||||||
# Name of the engine node. This can be an opaque identifier.It
|
# Name of the engine node. This can be an opaque identifier.It
|
||||||
# is not necessarily a hostname, FQDN, or IP address. (string
|
# is not necessarily a hostname, FQDN, or IP address. (string
|
||||||
# value)
|
# value)
|
||||||
|
|
|
@ -104,9 +104,18 @@ engine_opts = [
|
||||||
cfg.IntOpt('max_stacks_per_tenant',
|
cfg.IntOpt('max_stacks_per_tenant',
|
||||||
default=100,
|
default=100,
|
||||||
help=_('Maximum number of stacks any one tenant may have'
|
help=_('Maximum number of stacks any one tenant may have'
|
||||||
' active at one time.'))]
|
' active at one time.')),
|
||||||
|
cfg.IntOpt('event_purge_batch_size',
|
||||||
|
default=10,
|
||||||
|
help=_('Controls how many events will be pruned whenever a '
|
||||||
|
' stack\'s events exceed max_events_per_stack. Set this'
|
||||||
|
' lower to keep more events at the expense of more'
|
||||||
|
' frequent purges.')),
|
||||||
|
cfg.IntOpt('max_events_per_stack',
|
||||||
|
default=1000,
|
||||||
|
help=_('Maximum events that will be available per stack. Older'
|
||||||
|
' events will be deleted when this is reached. Set to 0'
|
||||||
|
' for unlimited events per stack.'))]
|
||||||
rpc_opts = [
|
rpc_opts = [
|
||||||
cfg.StrOpt('host',
|
cfg.StrOpt('host',
|
||||||
default=socket.gethostname(),
|
default=socket.gethostname(),
|
||||||
|
|
|
@ -17,9 +17,12 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from sqlalchemy.orm.session import Session
|
from sqlalchemy.orm.session import Session
|
||||||
|
|
||||||
|
cfg.CONF.import_opt('max_events_per_stack', 'heat.common.config')
|
||||||
|
|
||||||
from heat.openstack.common.gettextutils import _
|
from heat.openstack.common.gettextutils import _
|
||||||
|
|
||||||
from heat.common import crypt
|
from heat.common import crypt
|
||||||
|
@ -341,7 +344,35 @@ def event_count_all_by_stack(context, stack_id):
|
||||||
return _query_all_by_stack(context, stack_id).count()
|
return _query_all_by_stack(context, stack_id).count()
|
||||||
|
|
||||||
|
|
||||||
|
def _delete_event_rows(context, stack_id, limit):
|
||||||
|
# MySQL does not support LIMIT in subqueries,
|
||||||
|
# sqlite does not support JOIN in DELETE.
|
||||||
|
# So we must manually supply the IN() values.
|
||||||
|
# pgsql SHOULD work with the pure DELETE/JOIN below but that must be
|
||||||
|
# confirmed via integration tests.
|
||||||
|
query = _query_all_by_stack(context, stack_id)
|
||||||
|
session = _session(context)
|
||||||
|
if 'postgres' not in session.connection().dialect.name:
|
||||||
|
ids = [r.id for r in query.order_by(
|
||||||
|
models.Event.id).limit(limit).all()]
|
||||||
|
q = session.query(models.Event).filter(
|
||||||
|
models.Event.id.in_(ids))
|
||||||
|
else:
|
||||||
|
stmt = session.query(
|
||||||
|
models.Event.id).filter_by(
|
||||||
|
stack_id=stack_id).order_by(
|
||||||
|
models.Event.id).limit(limit).subquery()
|
||||||
|
q = query.join(stmt, models.Event.id == stmt.c.id)
|
||||||
|
return q.delete(synchronize_session='fetch')
|
||||||
|
|
||||||
|
|
||||||
def event_create(context, values):
|
def event_create(context, values):
|
||||||
|
if 'stack_id' in values and cfg.CONF.max_events_per_stack:
|
||||||
|
if ((event_count_all_by_stack(context, values['stack_id']) >=
|
||||||
|
cfg.CONF.max_events_per_stack)):
|
||||||
|
# prune
|
||||||
|
_delete_event_rows(
|
||||||
|
context, values['stack_id'], cfg.CONF.event_purge_batch_size)
|
||||||
event_ref = models.Event()
|
event_ref = models.Event()
|
||||||
event_ref.update(values)
|
event_ref.update(values)
|
||||||
event_ref.save(_session(context))
|
event_ref.save(_session(context))
|
||||||
|
|
|
@ -12,6 +12,10 @@
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
cfg.CONF.import_opt('event_purge_batch_size', 'heat.common.config')
|
||||||
|
cfg.CONF.import_opt('max_events_per_stack', 'heat.common.config')
|
||||||
|
|
||||||
import heat.db.api as db_api
|
import heat.db.api as db_api
|
||||||
from heat.engine import parser
|
from heat.engine import parser
|
||||||
|
@ -100,6 +104,25 @@ class EventTest(HeatTestCase):
|
||||||
self.assertNotEqual(None, loaded_e.timestamp)
|
self.assertNotEqual(None, loaded_e.timestamp)
|
||||||
self.assertEqual({'Foo': 'goo'}, loaded_e.resource_properties)
|
self.assertEqual({'Foo': 'goo'}, loaded_e.resource_properties)
|
||||||
|
|
||||||
|
def test_store_caps_events(self):
|
||||||
|
cfg.CONF.set_override('event_purge_batch_size', 1)
|
||||||
|
cfg.CONF.set_override('max_events_per_stack', 1)
|
||||||
|
self.resource.resource_id_set('resource_physical_id')
|
||||||
|
|
||||||
|
e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing',
|
||||||
|
'alabama', self.resource.properties,
|
||||||
|
self.resource.name, self.resource.type())
|
||||||
|
e.store()
|
||||||
|
self.assertEquals(1, len(db_api.event_get_all_by_stack(self.ctx,
|
||||||
|
self.stack.id)))
|
||||||
|
e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing',
|
||||||
|
'arizona', self.resource.properties,
|
||||||
|
self.resource.name, self.resource.type())
|
||||||
|
e.store()
|
||||||
|
events = db_api.event_get_all_by_stack(self.ctx, self.stack.id)
|
||||||
|
self.assertEquals(1, len(events))
|
||||||
|
self.assertEqual('arizona', events[0].physical_resource_id)
|
||||||
|
|
||||||
def test_identifier(self):
|
def test_identifier(self):
|
||||||
e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing',
|
e = event.Event(self.ctx, self.stack, 'TEST', 'IN_PROGRESS', 'Testing',
|
||||||
'wibble', self.resource.properties,
|
'wibble', self.resource.properties,
|
||||||
|
|
Loading…
Reference in New Issue