Performance improvements for Lifecycle events

Implement various performance improvements in the event handler.

- Since get_instance is expensive, delay it as long as possible (see #2
  in the bug report).  Only retrieve the instance right before we're
  going to use it.

- Delay all PartitionState events (see #3 in the bug report).

- Skip PartitionState-driven events entirely if nova is in the middle of
  an operation, since nova is already aware of the appropriate state
  changes.

- Only retrieve the admin context once, and cache it.

We keep the instance cache (see #1 in the bug report) since scale
testing showed it was indeed being used a nontrivial amount of the time.

Change-Id: I1f1634215b4c269842584c59f2c14c119c282b7e
Closes-Bug: #1694784
This commit is contained in:
Eric Fried 2017-06-01 14:10:48 -05:00
parent 484eaee78c
commit db759ce515
2 changed files with 351 additions and 243 deletions

View File

@ -15,68 +15,157 @@
# under the License. # under the License.
# #
from eventlet import greenthread
import logging
import mock import mock
from nova.compute import power_state
from nova import exception
from nova import test from nova import test
from pypowervm.wrappers import base_partition as pvm_bp from pypowervm.wrappers import event as pvm_evt
from nova_powervm.tests.virt import powervm
from nova_powervm.virt.powervm import event from nova_powervm.virt.powervm import event
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
logging.basicConfig() class TestGetInstance(test.TestCase):
@mock.patch('nova.context.get_admin_context')
@mock.patch('nova_powervm.virt.powervm.vm.get_instance')
def test_get_instance(self, mock_get_inst, mock_get_context):
# If instance provided, vm.get_instance not called
self.assertEqual('inst', event._get_instance('inst', 'uuid'))
mock_get_inst.assert_not_called()
# Note that we can only guarantee get_admin_context wasn't called
# because _get_instance is mocked everywhere else in this suite.
# Otherwise it could run from another test case executing in parallel.
mock_get_context.assert_not_called()
# If instance not provided, vm.get_instance is called
mock_get_inst.return_value = 'inst2'
for _ in range(2):
# Doing it the second time doesn't call get_admin_context() again.
self.assertEqual('inst2', event._get_instance(None, 'uuid'))
mock_get_context.assert_called_once_with()
mock_get_inst.assert_called_once_with(
mock_get_context.return_value, 'uuid')
mock_get_inst.reset_mock()
# Don't reset mock_get_context
class TestPowerVMNovaEventHandler(test.TestCase): class TestPowerVMNovaEventHandler(test.TestCase):
def setUp(self): def setUp(self):
super(TestPowerVMNovaEventHandler, self).setUp() super(TestPowerVMNovaEventHandler, self).setUp()
lceh_process_p = mock.patch(
'nova_powervm.virt.powervm.event.PowerVMLifecycleEventHandler.'
'process')
self.addCleanup(lceh_process_p.stop)
self.mock_lceh_process = lceh_process_p.start()
self.mock_driver = mock.Mock() self.mock_driver = mock.Mock()
self.handler = event.PowerVMNovaEventHandler(self.mock_driver) self.handler = event.PowerVMNovaEventHandler(self.mock_driver)
@mock.patch('nova.context.get_admin_context', mock.MagicMock()) @mock.patch('nova_powervm.virt.powervm.event._get_instance')
@mock.patch.object(vm, 'get_instance') def test_handle_inst_event(self, mock_get_instance):
@mock.patch.object(vm, 'get_vm_qp') # If no event we care about, or NVRAM but no nvram_mgr, nothing happens
def test_events(self, mock_qprops, mock_get_inst): self.mock_driver.nvram_mgr = None
# Test events for dets in ([], ['foo', 'bar', 'baz'], ['NVRAM']):
event_data = [ self.assertEqual('inst', self.handler._handle_inst_event(
mock.Mock(etype='NEW_CLIENT', data='', eid='1452692619554', 'inst', 'uuid', dets))
mock_get_instance.assert_not_called()
self.mock_lceh_process.assert_not_called()
self.mock_driver.nvram_mgr = mock.Mock()
# PartitionState only: no NVRAM handling, and inst is passed through.
self.assertEqual('inst', self.handler._handle_inst_event(
'inst', 'uuid', ['foo', 'PartitionState', 'bar']))
mock_get_instance.assert_not_called()
self.mock_driver.nvram_mgr.store.assert_not_called()
self.mock_lceh_process.assert_called_once_with('inst', 'uuid')
self.mock_lceh_process.reset_mock()
# No instance; nothing happens (we skip PartitionState handling too)
mock_get_instance.return_value = None
self.assertIsNone(self.handler._handle_inst_event(
'inst', 'uuid', ['NVRAM', 'PartitionState']))
mock_get_instance.assert_called_once_with('inst', 'uuid')
self.mock_driver.nvram_mgr.store.assert_not_called()
self.mock_lceh_process.assert_not_called()
mock_get_instance.reset_mock()
mock_get_instance.return_value = 'inst'
# NVRAM only - no PartitionState handling, instance is returned
self.assertEqual('inst', self.handler._handle_inst_event(
None, 'uuid', ['NVRAM', 'baz']))
mock_get_instance.assert_called_once_with(None, 'uuid')
self.mock_driver.nvram_mgr.store.assert_called_once_with('inst')
self.mock_lceh_process.assert_not_called()
mock_get_instance.reset_mock()
self.mock_driver.nvram_mgr.store.reset_mock()
# Both event types
self.assertEqual('inst', self.handler._handle_inst_event(
None, 'uuid', ['PartitionState', 'NVRAM']))
mock_get_instance.assert_called_once_with(None, 'uuid')
self.mock_driver.nvram_mgr.store.assert_called_once_with('inst')
self.mock_lceh_process.assert_called_once_with('inst', 'uuid')
@mock.patch('nova_powervm.virt.powervm.event.PowerVMNovaEventHandler.'
'_handle_inst_event')
@mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
def test_process(self, mock_get_rpu, mock_handle):
# NEW_CLIENT/CACHE_CLEARED events are ignored
events = [mock.Mock(etype=pvm_evt.EventType.NEW_CLIENT),
mock.Mock(etype=pvm_evt.EventType.CACHE_CLEARED)]
self.handler.process(events)
self.assertEqual(0, mock_get_rpu.call_count)
mock_handle.assert_not_called()
moduri = pvm_evt.EventType.MODIFY_URI
# If get_req_path_uuid doesn't find a UUID, or not a LogicalPartition
# URI, or details is empty, or has no actions we care about, no action
# is taken.
mock_get_rpu.side_effect = [None, 'uuid1', 'uuid2', 'uuid3']
events = [
mock.Mock(etype=moduri, data='foo/LogicalPartition/None',
details='NVRAM,PartitionState'),
mock.Mock(etype=moduri, data='bar/VirtualIOServer/uuid1',
details='NVRAM,PartitionState'),
mock.Mock(etype=moduri, data='baz/LogicalPartition/uuid2',
detail=''), detail=''),
mock.Mock(etype='MODIFY_URI', detail='Other', eid='1452692619555', mock.Mock(etype=moduri, data='blah/LogicalPartition/uuid3',
data='http://localhost:12080/rest/api/uom/Managed' detail='do,not,care')]
'System/c889bf0d-9996-33ac-84c5-d16727083a77'), self.handler.process(events)
mock.Mock(etype='MODIFY_URI', detail='ReferenceCode,Other', mock_get_rpu.assert_has_calls(
eid='1452692619563', [mock.call(uri, preserve_case=True)
data='http://localhost:12080/rest/api/uom/Managed' for uri in ('bar/VirtualIOServer/uuid1',
'System/c889bf0d-9996-33ac-84c5-d16727083a77/' 'baz/LogicalPartition/uuid2',
'LogicalPartition/794654F5-B6E9-4A51-BEC2-' 'blah/LogicalPartition/uuid3')])
'A73E41EAA938'), mock_handle.assert_not_called()
mock.Mock(etype='MODIFY_URI',
detail='RMCState,PartitionState,Other',
eid='1452692619566',
data='http://localhost:12080/rest/api/uom/Managed'
'System/c889bf0d-9996-33ac-84c5-d16727083a77/'
'LogicalPartition/794654F5-B6E9-4A51-BEC2-'
'A73E41EAA938'),
mock.Mock(etype='MODIFY_URI',
detail='NVRAM',
eid='1452692619566',
data='http://localhost:12080/rest/api/uom/Managed'
'System/c889bf0d-9996-33ac-84c5-d16727083a77/'
'LogicalPartition/794654F5-B6E9-4A51-BEC2-'
'A73E41EAA938'),
]
mock_qprops.return_value = pvm_bp.LPARState.RUNNING mock_get_rpu.reset_mock()
mock_get_inst.return_value = powervm.TEST_INST1
self.handler.process(event_data) # The stars align, and we handle some events.
mock_get_inst.assert_called_once_with(mock.ANY, '794654F5-B6E9-' uuid_det = (('uuid1', 'NVRAM'),
'4A51-BEC2-A73E41EAA938') ('uuid2', 'this,one,ignored'),
('uuid3', 'PartitionState,baz,NVRAM'),
self.assertTrue(self.mock_driver.emit_event.called) # Repeat uuid1 to test the cache
self.assertTrue(self.mock_driver.nvram_mgr.store.called) ('uuid1', 'blah,PartitionState'),
('uuid5', 'also,ignored'))
mock_get_rpu.side_effect = [ud[0] for ud in uuid_det]
events = [
mock.Mock(etype=moduri, data='LogicalPartition/' + uuid,
detail=detail) for uuid, detail in uuid_det]
# Set up _handle_inst_event to test the cache and the exception path
mock_handle.side_effect = ['inst1', None, ValueError]
# Run it!
self.handler.process(events)
mock_get_rpu.assert_has_calls(
[mock.call(uri, preserve_case=True) for uri in
('LogicalPartition/' + ud[0] for ud in uuid_det)])
mock_handle.assert_has_calls(
[mock.call(None, 'uuid1', ['NVRAM']),
mock.call(None, 'uuid3', ['PartitionState', 'baz', 'NVRAM']),
# inst1 pulled from the cache based on uuid1
mock.call('inst1', 'uuid1', ['blah', 'PartitionState'])])
class TestPowerVMLifecycleEventHandler(test.TestCase): class TestPowerVMLifecycleEventHandler(test.TestCase):
@ -85,109 +174,106 @@ class TestPowerVMLifecycleEventHandler(test.TestCase):
self.mock_driver = mock.MagicMock() self.mock_driver = mock.MagicMock()
self.handler = event.PowerVMLifecycleEventHandler(self.mock_driver) self.handler = event.PowerVMLifecycleEventHandler(self.mock_driver)
def test_is_delay_event(self): @mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
non_delay_evts = [ @mock.patch('nova_powervm.virt.powervm.event._get_instance')
pvm_bp.LPARState.ERROR, @mock.patch('nova_powervm.virt.powervm.vm.translate_event')
pvm_bp.LPARState.OPEN_FIRMWARE, @mock.patch('nova.virt.event.LifecycleEvent')
pvm_bp.LPARState.RUNNING, def test_emit_event(self, mock_lce, mock_tx_evt, mock_get_inst, mock_qp):
pvm_bp.LPARState.MIGRATING_NOT_ACTIVE, def assert_qp():
pvm_bp.LPARState.MIGRATING_RUNNING, mock_qp.assert_called_once_with(
pvm_bp.LPARState.HARDWARE_DISCOVERY, self.mock_driver.adapter, 'uuid', 'PartitionState')
pvm_bp.LPARState.STARTING, mock_qp.reset_mock()
pvm_bp.LPARState.UNKNOWN
]
delay_evts = [ def assert_get_inst():
pvm_bp.LPARState.NOT_ACTIVATED, mock_get_inst.assert_called_once_with('inst', 'uuid')
pvm_bp.LPARState.SHUTTING_DOWN, mock_get_inst.reset_mock()
pvm_bp.LPARState.SUSPENDING,
pvm_bp.LPARState.RESUMING,
pvm_bp.LPARState.NOT_AVAILBLE
]
for non_delay_evt in non_delay_evts: # Ignore if LPAR is gone
self.assertFalse(self.handler._is_delay_event(non_delay_evt), mock_qp.side_effect = exception.InstanceNotFound(instance_id='uuid')
msg=non_delay_evt) self.handler._emit_event('uuid', None)
assert_qp()
mock_get_inst.assert_not_called()
mock_tx_evt.assert_not_called()
mock_lce.assert_not_called()
self.mock_driver.emit_event.assert_not_called()
for delay_evt in delay_evts: # Let get_vm_qp return its usual mock from now on
self.assertTrue(self.handler._is_delay_event(delay_evt), mock_qp.side_effect = None
msg=delay_evt)
@mock.patch('nova_powervm.virt.powervm.event.' # Ignore if instance is gone
'PowerVMLifecycleEventHandler._register_delayed_event') mock_get_inst.return_value = None
@mock.patch('nova_powervm.virt.powervm.event.' self.handler._emit_event('uuid', 'inst')
'PowerVMLifecycleEventHandler._emit_event') assert_qp()
def test_process(self, mock_emit, mock_reg_delay_evt): assert_get_inst()
non_delay_evts = [ mock_tx_evt.assert_not_called()
pvm_bp.LPARState.ERROR, mock_lce.assert_not_called()
pvm_bp.LPARState.OPEN_FIRMWARE self.mock_driver.emit_event.assert_not_called()
]
delay_evts = [ # Ignore if task_state isn't one we care about
pvm_bp.LPARState.NOT_ACTIVATED, for task_state in event._NO_EVENT_TASK_STATES:
pvm_bp.LPARState.SHUTTING_DOWN, mock_get_inst.return_value = mock.Mock(task_state=task_state)
pvm_bp.LPARState.RESUMING, self.handler._emit_event('uuid', 'inst')
] assert_qp()
assert_get_inst()
mock_tx_evt.assert_not_called()
mock_lce.assert_not_called()
self.mock_driver.emit_event.assert_not_called()
for state in non_delay_evts + delay_evts: # Task state we care about from now on
self.handler.process(mock.Mock(), state) inst = mock.Mock(task_state='scheduling',
power_state=power_state.RUNNING)
mock_get_inst.return_value = inst
self.assertEqual(mock_emit.call_count, 2) # Ignore if not a transition we care about
self.assertEqual(mock_reg_delay_evt.call_count, 3) mock_tx_evt.return_value = None
self.handler._emit_event('uuid', 'inst')
assert_qp()
assert_get_inst()
mock_tx_evt.assert_called_once_with(
mock_qp.return_value, power_state.RUNNING)
mock_lce.assert_not_called()
self.mock_driver.emit_event.assert_not_called()
@mock.patch('nova_powervm.virt.powervm.event.vm.translate_event') mock_tx_evt.reset_mock()
def test_emit_event_immed(self, mock_translate):
mock_translate.return_value = 'test'
mock_delayed = mock.MagicMock()
mock_inst = mock.Mock()
mock_inst.uuid = 'inst_uuid'
self.handler._delayed_event_threads = {'inst_uuid': mock_delayed}
self.handler._emit_event(pvm_bp.LPARState.RUNNING, mock_inst, True) # Good path
mock_tx_evt.return_value = 'transition'
self.assertEqual({}, self.handler._delayed_event_threads) self.handler._delayed_event_threads = {'uuid': 'thread1',
self.mock_driver.emit_event.assert_called_once() 'uuid2': 'thread2'}
mock_delayed.cancel.assert_called_once() self.handler._emit_event('uuid', 'inst')
assert_qp()
@mock.patch('nova_powervm.virt.powervm.event.vm.translate_event') assert_get_inst()
def test_emit_event_delayed(self, mock_translate): mock_tx_evt.assert_called_once_with(
mock_translate.return_value = 'test' mock_qp.return_value, power_state.RUNNING)
mock_delayed = mock.MagicMock() mock_lce.assert_called_once_with(inst.uuid, 'transition')
mock_inst = mock.Mock() self.mock_driver.emit_event.assert_called_once_with(
mock_inst.uuid = 'inst_uuid' mock_lce.return_value)
self.handler._delayed_event_threads = {'inst_uuid': mock_delayed} # The thread was removed
self.assertEqual({'uuid2': 'thread2'},
self.handler._emit_event(pvm_bp.LPARState.NOT_ACTIVATED, mock_inst,
False)
self.assertEqual({}, self.handler._delayed_event_threads)
self.mock_driver.emit_event.assert_called_once()
def test_emit_event_delayed_no_queue(self):
mock_inst = mock.Mock()
mock_inst.uuid = 'inst_uuid'
self.handler._delayed_event_threads = {}
self.handler._emit_event(pvm_bp.LPARState.NOT_ACTIVATED, mock_inst,
False)
self.assertFalse(self.mock_driver.emit_event.called)
@mock.patch.object(greenthread, 'spawn_after')
def test_register_delay_event(self, mock_spawn):
mock_old_delayed, mock_new_delayed = mock.Mock(), mock.Mock()
mock_spawn.return_value = mock_new_delayed
mock_inst = mock.Mock()
mock_inst.uuid = 'inst_uuid'
self.handler._delayed_event_threads = {'inst_uuid': mock_old_delayed}
self.handler._register_delayed_event(pvm_bp.LPARState.NOT_ACTIVATED,
mock_inst)
mock_old_delayed.cancel.assert_called_once()
mock_spawn.assert_called_once_with(
15, self.handler._emit_event, pvm_bp.LPARState.NOT_ACTIVATED,
mock_inst, False)
self.assertEqual({'inst_uuid': mock_new_delayed},
self.handler._delayed_event_threads) self.handler._delayed_event_threads)
@mock.patch('eventlet.greenthread.spawn_after')
def test_process(self, mock_spawn):
thread1 = mock.Mock()
thread2 = mock.Mock()
mock_spawn.side_effect = [thread1, thread2]
# First call populates the delay queue
self.assertEqual({}, self.handler._delayed_event_threads)
self.handler.process(None, 'uuid')
mock_spawn.assert_called_once_with(15, self.handler._emit_event,
'uuid', None)
self.assertEqual({'uuid': thread1},
self.handler._delayed_event_threads)
thread1.cancel.assert_not_called()
thread2.cancel.assert_not_called()
mock_spawn.reset_mock()
# Second call cancels the first thread and replaces it in delay queue
self.handler.process('inst', 'uuid')
mock_spawn.assert_called_once_with(15, self.handler._emit_event,
'uuid', 'inst')
self.assertEqual({'uuid': thread2},
self.handler._delayed_event_threads)
thread1.cancel.assert_called_once_with()
thread2.cancel.assert_not_called()

View File

@ -14,66 +14,72 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from eventlet import greenthread from eventlet import greenthread
from nova.compute import power_state
from nova.compute import task_states
from nova import context as ctx from nova import context as ctx
from nova import exception
from nova.virt import event from nova.virt import event
from oslo_concurrency import lockutils from oslo_concurrency import lockutils
from oslo_log import log as logging from oslo_log import log as logging
from pypowervm import adapter as pvm_apt from pypowervm import adapter as pvm_apt
from pypowervm import util as pvm_util from pypowervm import util as pvm_util
from pypowervm.wrappers import base_partition as pvm_bp
from pypowervm.wrappers import event as pvm_evt from pypowervm.wrappers import event as pvm_evt
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vm from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_INST_ACTIONS_HANDLED = {'PartitionState', 'NVRAM'}
_NO_EVENT_TASK_STATES = {
task_states.SPAWNING,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_REVERTING,
task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.REBOOT_STARTED_HARD,
task_states.PAUSING,
task_states.UNPAUSING,
task_states.SUSPENDING,
task_states.RESUMING,
task_states.POWERING_OFF,
task_states.POWERING_ON,
task_states.RESCUING,
task_states.UNRESCUING,
task_states.REBUILDING,
task_states.REBUILD_SPAWNING,
task_states.MIGRATING,
task_states.DELETING,
task_states.SOFT_DELETING,
task_states.RESTORING,
task_states.SHELVING,
task_states.SHELVING_OFFLOADING,
task_states.UNSHELVING,
}
_LIFECYCLE_EVT_LOCK = 'pvm_lifecycle_event' _LIFECYCLE_EVT_LOCK = 'pvm_lifecycle_event'
_CONTEXT = None
def _get_instance(inst, pvm_uuid):
global _CONTEXT
if inst is not None:
return inst
with lockutils.lock('get_context_once'):
if _CONTEXT is None:
_CONTEXT = ctx.get_admin_context()
LOG.debug('PowerVM Nova Event Handler: Getting inst for id %s', pvm_uuid)
return vm.get_instance(_CONTEXT, pvm_uuid)
class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler): class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
"""Used to receive and handle events from PowerVM and convert to Nova.""" """Used to receive and handle events from PowerVM and convert to Nova."""
inst_actions_handled = {'PartitionState', 'NVRAM'}
def __init__(self, driver): def __init__(self, driver):
self._driver = driver self._driver = driver
self._lifecycle_handler = PowerVMLifecycleEventHandler(self._driver) self._lifecycle_handler = PowerVMLifecycleEventHandler(self._driver)
def _handle_event(self, pvm_event, details, inst=None):
"""Handle an individual event.
:param pvm_event: PowerVM Event Wrapper
:param details: Parsed Details from the event
:param inst: (Optional, Default: None) The pypowervm wrapper object
that represents the VM instance.
If None we try to look it up based on UUID.
:return: returns the instance object or None (when it's not an
instance event or action is not partition state change
or NVRAM change)
"""
# See if this uri (from data) ends with a PowerVM UUID.
if not pvm_util.is_instance_path(pvm_event.data):
return None
# If a vm event and one we handle, call the inst handler.
pvm_uuid = pvm_util.get_req_path_uuid(
pvm_event.data, preserve_case=True)
if (pvm_event.data.endswith('LogicalPartition/' + pvm_uuid) and
(self.inst_actions_handled & set(details))):
if not inst:
LOG.debug('PowerVM Nova Event Handler: Getting inst '
'for id %s', pvm_uuid)
inst = vm.get_instance(ctx.get_admin_context(),
pvm_uuid)
if inst:
LOG.debug('Handle action "%(action)s" event for instance: '
'%(inst)s', dict(action=details, inst=inst.name))
self._handle_inst_event(inst, pvm_uuid, details)
return inst
return None
def _handle_inst_event(self, inst, pvm_uuid, details): def _handle_inst_event(self, inst, pvm_uuid, details):
"""Handle an instance event. """Handle an instance event.
@ -84,19 +90,24 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
:param inst: the instance object. :param inst: the instance object.
:param pvm_uuid: the PowerVM uuid of the vm :param pvm_uuid: the PowerVM uuid of the vm
:param details: Parsed Details from the event :param details: Parsed Details from the event
:return inst: The nova instance, which may be None
""" """
# If the state of the vm changed see if it should be handled
if 'PartitionState' in details:
# Get the current state
pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid,
'PartitionState')
self._lifecycle_handler.process(inst, pvm_state)
# If the NVRAM has changed for this instance and a store is configured. # If the NVRAM has changed for this instance and a store is configured.
if 'NVRAM' in details and self._driver.nvram_mgr is not None: if 'NVRAM' in details and self._driver.nvram_mgr is not None:
# Schedule the NVRAM for the instance to be stored. # Schedule the NVRAM for the instance to be stored.
inst = _get_instance(inst, pvm_uuid)
if inst is None:
return None
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid)
self._driver.nvram_mgr.store(inst) self._driver.nvram_mgr.store(inst)
# If the state of the vm changed see if it should be handled
if 'PartitionState' in details:
self._lifecycle_handler.process(inst, pvm_uuid)
return inst
def process(self, events): def process(self, events):
"""Process the event that comes back from PowerVM. """Process the event that comes back from PowerVM.
@ -105,19 +116,35 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
inst_cache = {} inst_cache = {}
for pvm_event in events: for pvm_event in events:
try: try:
if pvm_event.etype in (pvm_evt.EventType.NEW_CLIENT,
pvm_evt.EventType.CACHE_CLEARED):
# TODO(efried): Should we pull and check all the LPARs?
continue
# See if this uri (from data) ends with a PowerVM UUID.
pvm_uuid = pvm_util.get_req_path_uuid(
pvm_event.data, preserve_case=True)
if pvm_uuid is None:
continue
# Is it an instance event?
if not pvm_event.data.endswith('LogicalPartition/' + pvm_uuid):
continue
# Pull all the pieces of the event. # Pull all the pieces of the event.
details = (pvm_event.detail.split(',') if pvm_event.detail details = (pvm_event.detail.split(',') if pvm_event.detail
else []) else [])
# Is it one we care about?
if not _INST_ACTIONS_HANDLED & set(details):
continue
inst_cache[pvm_event.data] = self._handle_inst_event(
inst_cache.get(pvm_event.data), pvm_uuid, details)
if pvm_event.etype not in pvm_evt.EventType.NEW_CLIENT:
LOG.debug('PowerVM Event-Action: %s URI: %s Details %s',
pvm_event.etype, pvm_event.data, details)
inst_cache[pvm_event.data] = self._handle_event(
pvm_event, details, inst=inst_cache.get(pvm_event.data,
None))
except Exception: except Exception:
LOG.exception('Unable to parse event URI: %s from PowerVM.', # We deliberately keep this exception clause as broad as
pvm_event.data) # possible - we don't want *any* error to stop us from
# attempting to process the next event.
LOG.exception('Unable to process PowerVM event %s',
str(pvm_event))
class PowerVMLifecycleEventHandler(object): class PowerVMLifecycleEventHandler(object):
@ -136,71 +163,66 @@ class PowerVMLifecycleEventHandler(object):
it. Ex. Don't send a stop event up to nova if you received a start event it. Ex. Don't send a stop event up to nova if you received a start event
shortly after it. shortly after it.
""" """
def __init__(self, driver): def __init__(self, driver):
self._driver = driver self._driver = driver
self._delayed_event_threads = {} self._delayed_event_threads = {}
def _is_delay_event(self, pvm_state):
return pvm_state in [pvm_bp.LPARState.NOT_ACTIVATED,
pvm_bp.LPARState.SHUTTING_DOWN,
pvm_bp.LPARState.SUSPENDING,
pvm_bp.LPARState.RESUMING,
pvm_bp.LPARState.NOT_AVAILBLE]
@lockutils.synchronized(_LIFECYCLE_EVT_LOCK) @lockutils.synchronized(_LIFECYCLE_EVT_LOCK)
def _register_delayed_event(self, pvm_state, inst): def _emit_event(self, pvm_uuid, inst):
# Cancel out the current delay event. Can happen as it goes # Get the current state
# from SHUTTING_DOWN to NOT_ACTIVATED, multiple delayed events try:
# can come in at once. Only want the last. pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid,
if inst.uuid in self._delayed_event_threads: 'PartitionState')
self._delayed_event_threads[inst.uuid].cancel() except exception.InstanceNotFound:
LOG.debug("Instance for LPAR %s was deleted while event was "
"delayed.", pvm_uuid)
return
# Spawn in the background LOG.debug('New state %s for partition %s', pvm_state, pvm_uuid,
elem = greenthread.spawn_after( instance=inst)
15, self._emit_event, pvm_state, inst, False)
self._delayed_event_threads[inst.uuid] = elem
@lockutils.synchronized(_LIFECYCLE_EVT_LOCK) inst = _get_instance(inst, pvm_uuid)
def _emit_event(self, pvm_state, inst, is_immed): if inst is None:
if is_immed: LOG.debug("Not emitting LifecycleEvent: no instance for LPAR %s",
# Cancel out any delayed events pvm_uuid)
cancel_thread = self._delayed_event_threads.get(inst.uuid) return
if cancel_thread:
cancel_thread.cancel() # If we're in the middle of a nova-driven operation, no event necessary
del self._delayed_event_threads[inst.uuid] if inst.task_state in _NO_EVENT_TASK_STATES:
else: LOG.debug("Not emitting LifecycleEvent: instance task_state is %s",
# Make sure you're still in the thread. If not (thread was started inst.task_state, instance=inst)
# but the is_immed _emit_event had run the del), then just bail return
inst_queue = self._delayed_event_threads.get(inst.uuid)
if not inst_queue:
return
# See if it's really a change of state from what OpenStack knows # See if it's really a change of state from what OpenStack knows
transition = vm.translate_event(pvm_state, inst.power_state) transition = vm.translate_event(pvm_state, inst.power_state)
if transition is None: if transition is None:
LOG.debug("No LifecycleEvent necessary for pvm_state(%s) and "
"power_state(%s).", pvm_state,
power_state.STATE_MAP[inst.power_state], instance=inst)
return return
# Log as if normal event # Log as if normal event
lce = event.LifecycleEvent(inst.uuid, transition) lce = event.LifecycleEvent(inst.uuid, transition)
LOG.info(_LI('Sending life cycle event for instance state ' LOG.info('Sending LifecycleEvent for instance state change to: %s',
'change to: %s'), pvm_state, instance=inst) pvm_state, instance=inst)
self._driver.emit_event(lce) self._driver.emit_event(lce)
if not is_immed: # Delete out the queue
# Delete out the queue del self._delayed_event_threads[pvm_uuid]
del self._delayed_event_threads[inst.uuid]
def process(self, inst, pvm_state): @lockutils.synchronized(_LIFECYCLE_EVT_LOCK)
"""Adds the event to the emit queue. def process(self, inst, pvm_uuid):
"""Emits the event, or adds it to the queue for delayed emission.
:param inst: The nova instance. :param inst: The nova instance. May be None.
:param pvm_state: The PowerVM LPAR State. :param pvm_uuid: The PowerVM LPAR UUID.
""" """
LOG.debug('New state for instance: %s', pvm_state, # Cancel out the current delay event. Can happen as it goes
instance=inst) # from SHUTTING_DOWN to NOT_ACTIVATED, multiple delayed events
# Now create an event and sent it. # can come in at once. Only want the last.
if self._is_delay_event(pvm_state): if pvm_uuid in self._delayed_event_threads:
self._register_delayed_event(pvm_state, inst) self._delayed_event_threads[pvm_uuid].cancel()
else:
self._emit_event(pvm_state, inst, True) # Spawn in the background
elem = greenthread.spawn_after(15, self._emit_event, pvm_uuid, inst)
self._delayed_event_threads[pvm_uuid] = elem