Change NVRAM manager and Swift APIs to accept UUID
Currently the NVRAM manager store API takes the instance object as the paramater, which by itself is just fine. However, the event handler (virt/powervm/event.py) has to grab the entire Instance object just for the store method when responding to NVRAM events. This change makes it so that the NvramManager and Swift APIs take the instance uuid OR instance object as the parameter. A cache is put in the event listener to maintain a mapping of the PVM UUID to the instance object UUID so that the instance object still does not have to be retrieved on every event. Change-Id: Id3513cf9a7d905f772f1eb3576a3e79a6fb57eae
This commit is contained in:
@@ -22,7 +22,7 @@ class NoopNvramStore(api.NvramStore):
|
||||
def store(self, instance, data, force=True):
|
||||
"""Store the NVRAM into the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
:param data: the NVRAM data base64 encoded string
|
||||
:param force: boolean whether an update should always be saved,
|
||||
otherwise, check to see if it's changed.
|
||||
@@ -32,7 +32,7 @@ class NoopNvramStore(api.NvramStore):
|
||||
def fetch(self, instance):
|
||||
"""Fetch the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
:returns: the NVRAM data base64 encoded string
|
||||
"""
|
||||
return None
|
||||
@@ -40,7 +40,7 @@ class NoopNvramStore(api.NvramStore):
|
||||
def delete(self, instance):
|
||||
"""Delete the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
"""
|
||||
pass
|
||||
|
||||
@@ -50,7 +50,7 @@ class ExpNvramStore(NoopNvramStore):
|
||||
def fetch(self, instance):
|
||||
"""Fetch the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
:returns: the NVRAM data base64 encoded string
|
||||
"""
|
||||
# Raise exception. This is to ensure fetch causes a failure
|
||||
@@ -60,7 +60,7 @@ class ExpNvramStore(NoopNvramStore):
|
||||
def delete(self, instance):
|
||||
"""Delete the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
"""
|
||||
# Raise excpetion. This is to ensure delete does not fail
|
||||
# despite an exception being raised
|
||||
|
||||
@@ -38,13 +38,15 @@ class TestNvramManager(test.TestCase):
|
||||
fixtures.MockPatchObject(self.fake_store, 'fetch')).mock
|
||||
self.mock_remove = self.useFixture(
|
||||
fixtures.MockPatchObject(self.fake_store, 'delete')).mock
|
||||
self.mock_exp_remove = self.useFixture(
|
||||
fixtures.MockPatchObject(self.fake_exp_store, 'delete')).mock
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.nvram.manager.LOG.exception')
|
||||
@mock.patch.object(vm, 'get_instance_wrapper')
|
||||
def test_store_with_exception(self, mock_get_inst, mock_log):
|
||||
mock_get_inst.side_effect = pvm_exc.HttpError(mock.Mock())
|
||||
mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock())
|
||||
mgr.store(powervm.TEST_INST1)
|
||||
mgr.store(powervm.TEST_INST1.uuid)
|
||||
self.assertEqual(1, mock_log.call_count)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.nvram.manager.LOG.warning')
|
||||
@@ -52,18 +54,18 @@ class TestNvramManager(test.TestCase):
|
||||
def test_store_with_not_found_exc(self, mock_get_inst, mock_log):
|
||||
mock_get_inst.side_effect = pvm_exc.HttpNotFound(mock.Mock())
|
||||
mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock())
|
||||
mgr.store(powervm.TEST_INST1)
|
||||
mgr.store(powervm.TEST_INST1.uuid)
|
||||
mock_log.assert_not_called()
|
||||
|
||||
@mock.patch.object(vm, 'get_instance_wrapper')
|
||||
def test_manager(self, mock_get_inst):
|
||||
|
||||
mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock())
|
||||
mgr.store(powervm.TEST_INST1)
|
||||
mgr.store(powervm.TEST_INST1.uuid)
|
||||
mgr.store(powervm.TEST_INST2)
|
||||
|
||||
mgr.fetch(powervm.TEST_INST2)
|
||||
|
||||
mgr.fetch(powervm.TEST_INST2.uuid)
|
||||
mgr.remove(powervm.TEST_INST2)
|
||||
|
||||
# Simulate a quick repeated stores of the same LPAR by poking the Q.
|
||||
@@ -74,10 +76,13 @@ class TestNvramManager(test.TestCase):
|
||||
|
||||
mgr.shutdown()
|
||||
self.mock_store.assert_has_calls(
|
||||
[mock.call(powervm.TEST_INST1, mock.ANY),
|
||||
mock.call(powervm.TEST_INST2, mock.ANY)])
|
||||
self.mock_fetch.assert_called_with(powervm.TEST_INST2)
|
||||
self.mock_remove.assert_called_with(powervm.TEST_INST2)
|
||||
[mock.call(powervm.TEST_INST1.uuid, mock.ANY),
|
||||
mock.call(powervm.TEST_INST2.uuid, mock.ANY)])
|
||||
self.mock_fetch.assert_has_calls(
|
||||
[mock.call(powervm.TEST_INST2.uuid)] * 2)
|
||||
self.mock_remove.assert_called_once_with(powervm.TEST_INST2.uuid)
|
||||
|
||||
self.mock_remove.reset_mock()
|
||||
|
||||
# Test when fetch returns an exception
|
||||
mgr_exp = manager.NvramManager(self.fake_exp_store,
|
||||
@@ -86,5 +91,5 @@ class TestNvramManager(test.TestCase):
|
||||
mgr_exp.fetch, powervm.TEST_INST2)
|
||||
|
||||
# Test exception being logged but not raised during remove
|
||||
mgr_exp.remove(powervm.TEST_INST2)
|
||||
self.mock_remove.assert_called_with(powervm.TEST_INST2)
|
||||
mgr_exp.remove(powervm.TEST_INST2.uuid)
|
||||
self.mock_exp_remove.assert_called_once_with(powervm.TEST_INST2.uuid)
|
||||
|
||||
@@ -127,8 +127,7 @@ class TestSwiftStore(test.TestCase):
|
||||
mock_exists.return_value = True
|
||||
with mock.patch.object(self.swift_store, '_run_operation') as mock_run:
|
||||
mock_run.return_value = self._build_results(['obj'])
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid,
|
||||
powervm.TEST_INST1.name, 'data')
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid, 'data')
|
||||
mock_run.assert_called_once_with('upload', 'powervm_nvram',
|
||||
mock.ANY, options=None)
|
||||
|
||||
@@ -138,7 +137,7 @@ class TestSwiftStore(test.TestCase):
|
||||
mock_run.return_value = mock_result
|
||||
self.assertRaises(api.NVRAMUploadException,
|
||||
self.swift_store._store, powervm.TEST_INST1.uuid,
|
||||
powervm.TEST_INST1.name, 'data')
|
||||
'data')
|
||||
|
||||
# Test retry upload
|
||||
mock_run.reset_mock()
|
||||
@@ -149,8 +148,7 @@ class TestSwiftStore(test.TestCase):
|
||||
'object': '6ecb1386-53ab-43da-9e04-54e986ad4a9d'}
|
||||
mock_run.side_effect = [mock_res_obj,
|
||||
self._build_results(['obj'])]
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid,
|
||||
powervm.TEST_INST1.name, 'data')
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid, 'data')
|
||||
mock_run.assert_called_with('upload', 'powervm_nvram',
|
||||
mock.ANY, options=None)
|
||||
self.assertEqual(mock_run.call_count, 2)
|
||||
@@ -164,8 +162,7 @@ class TestSwiftStore(test.TestCase):
|
||||
mock_exists.return_value = False
|
||||
with mock.patch.object(self.swift_store, '_run_operation') as mock_run:
|
||||
mock_run.return_value = self._build_results(['obj'])
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid,
|
||||
powervm.TEST_INST1.name, 'data')
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid, 'data')
|
||||
mock_run.assert_called_once_with(
|
||||
'upload', 'powervm_nvram', mock.ANY,
|
||||
options={'leave_segments': True})
|
||||
@@ -179,8 +176,7 @@ class TestSwiftStore(test.TestCase):
|
||||
'object': '6ecb1386-53ab-43da-9e04-54e986ad4a9d'}
|
||||
mock_run.side_effect = [mock_res_obj,
|
||||
self._build_results(['obj'])]
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid,
|
||||
powervm.TEST_INST1.name, 'data')
|
||||
self.swift_store._store(powervm.TEST_INST1.uuid, 'data')
|
||||
mock_run.assert_called_with('upload', 'powervm_nvram', mock.ANY,
|
||||
options={'leave_segments': True})
|
||||
self.assertEqual(mock_run.call_count, 2)
|
||||
@@ -192,9 +188,8 @@ class TestSwiftStore(test.TestCase):
|
||||
# Test forcing a update
|
||||
with mock.patch.object(self.swift_store, '_store') as mock_store:
|
||||
mock_exists.return_value = False
|
||||
self.swift_store.store(powervm.TEST_INST1, 'data', force=True)
|
||||
self.swift_store.store(powervm.TEST_INST1.uuid, 'data', force=True)
|
||||
mock_store.assert_called_once_with(powervm.TEST_INST1.uuid,
|
||||
powervm.TEST_INST1.name,
|
||||
'data', exists=False)
|
||||
|
||||
with mock.patch.object(
|
||||
@@ -206,7 +201,8 @@ class TestSwiftStore(test.TestCase):
|
||||
results = self._build_results(['obj'])
|
||||
results[0]['headers'] = {'etag': data_md5_hash}
|
||||
mock_run.return_value = results
|
||||
self.swift_store.store(powervm.TEST_INST1, 'data', force=False)
|
||||
self.swift_store.store(powervm.TEST_INST1.uuid, 'data',
|
||||
force=False)
|
||||
self.assertFalse(mock_store.called)
|
||||
mock_run.assert_called_once_with(
|
||||
'stat', options={'long': True},
|
||||
@@ -217,7 +213,7 @@ class TestSwiftStore(test.TestCase):
|
||||
with mock.patch.object(self.swift_store, '_store') as mock_store:
|
||||
self.swift_store.store_slot_map("test_slot", 'data')
|
||||
mock_store.assert_called_once_with(
|
||||
'test_slot', 'test_slot', 'data')
|
||||
'test_slot', 'data')
|
||||
|
||||
@mock.patch('os.remove')
|
||||
@mock.patch('tempfile.NamedTemporaryFile')
|
||||
|
||||
@@ -32,7 +32,7 @@ class TestVMTasks(test.TestCase):
|
||||
def setUp(self):
|
||||
super(TestVMTasks, self).setUp()
|
||||
self.apt = mock.Mock()
|
||||
self.instance = mock.Mock()
|
||||
self.instance = mock.Mock(uuid='fake-uuid')
|
||||
|
||||
@mock.patch('pypowervm.utils.transaction.FeedTask')
|
||||
@mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task')
|
||||
|
||||
@@ -59,6 +59,33 @@ class TestPowerVMNovaEventHandler(test.TestCase):
|
||||
self.mock_driver = mock.Mock()
|
||||
self.handler = event.PowerVMNovaEventHandler(self.mock_driver)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.event._get_instance')
|
||||
def test_get_inst_uuid(self, mock_get_instance):
|
||||
fake_inst1 = mock.Mock(uuid='uuid1')
|
||||
fake_inst2 = mock.Mock(uuid='uuid2')
|
||||
mock_get_instance.side_effect = lambda i, u: {
|
||||
'fake_pvm_uuid1': fake_inst1,
|
||||
'fake_pvm_uuid2': fake_inst2}.get(u)
|
||||
|
||||
self.assertEqual(
|
||||
(fake_inst1, 'uuid1'),
|
||||
self.handler._get_inst_uuid(fake_inst1, 'fake_pvm_uuid1'))
|
||||
self.assertEqual(
|
||||
(fake_inst2, 'uuid2'),
|
||||
self.handler._get_inst_uuid(fake_inst2, 'fake_pvm_uuid2'))
|
||||
self.assertEqual(
|
||||
(None, 'uuid1'),
|
||||
self.handler._get_inst_uuid(None, 'fake_pvm_uuid1'))
|
||||
self.assertEqual(
|
||||
(fake_inst2, 'uuid2'),
|
||||
self.handler._get_inst_uuid(fake_inst2, 'fake_pvm_uuid2'))
|
||||
self.assertEqual(
|
||||
(fake_inst1, 'uuid1'),
|
||||
self.handler._get_inst_uuid(fake_inst1, 'fake_pvm_uuid1'))
|
||||
mock_get_instance.assert_has_calls(
|
||||
[mock.call(fake_inst1, 'fake_pvm_uuid1'),
|
||||
mock.call(fake_inst2, 'fake_pvm_uuid2')])
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.event._get_instance')
|
||||
def test_handle_inst_event(self, mock_get_instance):
|
||||
# If no event we care about, or NVRAM but no nvram_mgr, nothing happens
|
||||
@@ -89,24 +116,48 @@ class TestPowerVMNovaEventHandler(test.TestCase):
|
||||
self.mock_lceh_process.assert_not_called()
|
||||
|
||||
mock_get_instance.reset_mock()
|
||||
mock_get_instance.return_value = 'inst'
|
||||
fake_inst = mock.Mock(uuid='fake-uuid')
|
||||
mock_get_instance.return_value = fake_inst
|
||||
|
||||
# NVRAM only - no PartitionState handling, instance is returned
|
||||
self.assertEqual('inst', self.handler._handle_inst_event(
|
||||
self.assertEqual(fake_inst, self.handler._handle_inst_event(
|
||||
None, 'uuid', ['NVRAM', 'baz']))
|
||||
mock_get_instance.assert_called_once_with(None, 'uuid')
|
||||
self.mock_driver.nvram_mgr.store.assert_called_once_with('inst')
|
||||
self.mock_driver.nvram_mgr.store.assert_called_once_with('fake-uuid')
|
||||
self.mock_lceh_process.assert_not_called()
|
||||
|
||||
mock_get_instance.reset_mock()
|
||||
self.mock_driver.nvram_mgr.store.reset_mock()
|
||||
self.handler._uuid_cache.clear()
|
||||
|
||||
# Both event types
|
||||
self.assertEqual('inst', self.handler._handle_inst_event(
|
||||
self.assertEqual(fake_inst, self.handler._handle_inst_event(
|
||||
None, 'uuid', ['PartitionState', 'NVRAM']))
|
||||
mock_get_instance.assert_called_once_with(None, 'uuid')
|
||||
self.mock_driver.nvram_mgr.store.assert_called_once_with('inst')
|
||||
self.mock_lceh_process.assert_called_once_with('inst', 'uuid')
|
||||
self.mock_driver.nvram_mgr.store.assert_called_once_with('fake-uuid')
|
||||
self.mock_lceh_process.assert_called_once_with(fake_inst, 'uuid')
|
||||
|
||||
mock_get_instance.reset_mock()
|
||||
self.mock_driver.nvram_mgr.store.reset_mock()
|
||||
self.handler._uuid_cache.clear()
|
||||
|
||||
# Handle multiple NVRAM and PartitionState events
|
||||
self.assertEqual(fake_inst, self.handler._handle_inst_event(
|
||||
None, 'uuid', ['NVRAM']))
|
||||
self.assertEqual(None, self.handler._handle_inst_event(
|
||||
None, 'uuid', ['NVRAM']))
|
||||
self.assertEqual(None, self.handler._handle_inst_event(
|
||||
None, 'uuid', ['PartitionState']))
|
||||
self.assertEqual(fake_inst, self.handler._handle_inst_event(
|
||||
fake_inst, 'uuid', ['NVRAM']))
|
||||
self.assertEqual(fake_inst, self.handler._handle_inst_event(
|
||||
fake_inst, 'uuid', ['NVRAM', 'PartitionState']))
|
||||
mock_get_instance.assert_called_once_with(None, 'uuid')
|
||||
self.mock_driver.nvram_mgr.store.assert_has_calls(
|
||||
[mock.call('fake-uuid')] * 4)
|
||||
self.mock_lceh_process.assert_has_calls(
|
||||
[mock.call(None, 'uuid'),
|
||||
mock.call(fake_inst, 'uuid')])
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.event.PowerVMNovaEventHandler.'
|
||||
'_handle_inst_event')
|
||||
@@ -167,6 +218,69 @@ class TestPowerVMNovaEventHandler(test.TestCase):
|
||||
# inst1 pulled from the cache based on uuid1
|
||||
mock.call('inst1', 'uuid1', ['blah', 'PartitionState'])])
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.event._get_instance')
|
||||
@mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
|
||||
def test_uuid_cache(self, mock_get_rpu, mock_get_instance):
|
||||
deluri = pvm_evt.EventType.DELETE_URI
|
||||
moduri = pvm_evt.EventType.MODIFY_URI
|
||||
|
||||
fake_inst1 = mock.Mock(uuid='uuid1')
|
||||
fake_inst2 = mock.Mock(uuid='uuid2')
|
||||
fake_inst4 = mock.Mock(uuid='uuid4')
|
||||
mock_get_instance.side_effect = lambda i, u: {
|
||||
'fake_pvm_uuid1': fake_inst1,
|
||||
'fake_pvm_uuid2': fake_inst2,
|
||||
'fake_pvm_uuid4': fake_inst4}.get(u)
|
||||
mock_get_rpu.side_effect = lambda d, **k: d.split('/')[1]
|
||||
|
||||
uuid_det = (('fake_pvm_uuid1', 'NVRAM', moduri),
|
||||
('fake_pvm_uuid2', 'NVRAM', moduri),
|
||||
('fake_pvm_uuid4', 'NVRAM', moduri),
|
||||
('fake_pvm_uuid1', 'NVRAM', moduri),
|
||||
('fake_pvm_uuid2', '', deluri),
|
||||
('fake_pvm_uuid2', 'NVRAM', moduri),
|
||||
('fake_pvm_uuid1', '', deluri),
|
||||
('fake_pvm_uuid3', '', deluri))
|
||||
events = [
|
||||
mock.Mock(etype=etype, data='LogicalPartition/' + uuid,
|
||||
detail=detail) for uuid, detail, etype in uuid_det]
|
||||
self.handler.process(events[0:4])
|
||||
mock_get_instance.assert_has_calls([
|
||||
mock.call(None, 'fake_pvm_uuid1'),
|
||||
mock.call(None, 'fake_pvm_uuid2'),
|
||||
mock.call(None, 'fake_pvm_uuid4')])
|
||||
self.assertEqual({
|
||||
'fake_pvm_uuid1': 'uuid1',
|
||||
'fake_pvm_uuid2': 'uuid2',
|
||||
'fake_pvm_uuid4': 'uuid4'}, self.handler._uuid_cache)
|
||||
|
||||
mock_get_instance.reset_mock()
|
||||
|
||||
# Test the cache with a second process call
|
||||
self.handler.process(events[4:7])
|
||||
mock_get_instance.assert_has_calls([
|
||||
mock.call(None, 'fake_pvm_uuid2')])
|
||||
self.assertEqual({
|
||||
'fake_pvm_uuid2': 'uuid2',
|
||||
'fake_pvm_uuid4': 'uuid4'}, self.handler._uuid_cache)
|
||||
|
||||
mock_get_instance.reset_mock()
|
||||
|
||||
# Make sure a delete to a non-cached UUID doesn't blow up
|
||||
self.handler.process([events[7]])
|
||||
mock_get_instance.assert_not_called()
|
||||
|
||||
mock_get_rpu.reset_mock()
|
||||
mock_get_instance.reset_mock()
|
||||
|
||||
clear_events = [mock.Mock(etype=pvm_evt.EventType.NEW_CLIENT),
|
||||
mock.Mock(etype=pvm_evt.EventType.CACHE_CLEARED)]
|
||||
# This should clear the cache
|
||||
self.handler.process(clear_events)
|
||||
self.assertEqual(dict(), self.handler._uuid_cache)
|
||||
self.assertEqual(0, mock_get_rpu.call_count)
|
||||
mock_get_instance.assert_not_called()
|
||||
|
||||
|
||||
class TestPowerVMLifecycleEventHandler(test.TestCase):
|
||||
def setUp(self):
|
||||
|
||||
@@ -675,6 +675,18 @@ class TestVM(test.TestCase):
|
||||
str(mock_pwroff.call_args[1]['add_parms']))
|
||||
mock_lock.assert_called_once_with('power_uuid')
|
||||
|
||||
def test_get_pvm_uuid(self):
|
||||
|
||||
nova_uuid = "dbbb48f1-2406-4019-98af-1c16d3df0204"
|
||||
# Test with uuid string
|
||||
self.assertEqual('5BBB48F1-2406-4019-98AF-1C16D3DF0204',
|
||||
vm.get_pvm_uuid(nova_uuid))
|
||||
|
||||
mock_inst = mock.Mock(uuid=nova_uuid)
|
||||
# Test with instance object
|
||||
self.assertEqual('5BBB48F1-2406-4019-98AF-1C16D3DF0204',
|
||||
vm.get_pvm_uuid(mock_inst))
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
|
||||
def test_instance_exists(self, mock_getvmqp, mock_getuuid):
|
||||
|
||||
@@ -79,6 +79,23 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
|
||||
def __init__(self, driver):
|
||||
self._driver = driver
|
||||
self._lifecycle_handler = PowerVMLifecycleEventHandler(self._driver)
|
||||
self._uuid_cache = {}
|
||||
|
||||
def _get_inst_uuid(self, inst, pvm_uuid):
|
||||
"""Retrieve instance UUID from cache keyed by the PVM UUID.
|
||||
|
||||
:param inst: the instance object.
|
||||
:param pvm_uuid: the PowerVM uuid of the vm
|
||||
:return inst: the instance object.
|
||||
:return inst_uuid: The nova instance uuid
|
||||
"""
|
||||
inst_uuid = self._uuid_cache.get(pvm_uuid)
|
||||
if not inst_uuid:
|
||||
inst = _get_instance(inst, pvm_uuid)
|
||||
inst_uuid = inst.uuid if inst else None
|
||||
if inst_uuid:
|
||||
self._uuid_cache[pvm_uuid] = inst_uuid
|
||||
return inst, inst_uuid
|
||||
|
||||
def _handle_inst_event(self, inst, pvm_uuid, details):
|
||||
"""Handle an instance event.
|
||||
@@ -95,13 +112,16 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
|
||||
# If the NVRAM has changed for this instance and a store is configured.
|
||||
if 'NVRAM' in details and self._driver.nvram_mgr is not None:
|
||||
# Schedule the NVRAM for the instance to be stored.
|
||||
inst = _get_instance(inst, pvm_uuid)
|
||||
if inst is None:
|
||||
# We'll need to fetch the instance object in the event we don't
|
||||
# have the object and the UUID isn't cached. By updating the
|
||||
# object reference here and returning it the process method will
|
||||
# save the object in its cache.
|
||||
inst, inst_uuid = self._get_inst_uuid(inst, pvm_uuid)
|
||||
if inst_uuid is None:
|
||||
return None
|
||||
|
||||
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid,
|
||||
instance=inst)
|
||||
self._driver.nvram_mgr.store(inst)
|
||||
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid)
|
||||
self._driver.nvram_mgr.store(inst_uuid)
|
||||
|
||||
# If the state of the vm changed see if it should be handled
|
||||
if 'PartitionState' in details:
|
||||
@@ -120,6 +140,7 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
|
||||
if pvm_event.etype in (pvm_evt.EventType.NEW_CLIENT,
|
||||
pvm_evt.EventType.CACHE_CLEARED):
|
||||
# TODO(efried): Should we pull and check all the LPARs?
|
||||
self._uuid_cache.clear()
|
||||
continue
|
||||
# See if this uri (from data) ends with a PowerVM UUID.
|
||||
pvm_uuid = pvm_util.get_req_path_uuid(
|
||||
@@ -130,6 +151,13 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
|
||||
if not pvm_event.data.endswith('LogicalPartition/' + pvm_uuid):
|
||||
continue
|
||||
|
||||
# Are we deleting? Meaning we need to clear the cache entry.
|
||||
if pvm_event.etype == pvm_evt.EventType.DELETE_URI:
|
||||
try:
|
||||
del self._uuid_cache[pvm_uuid]
|
||||
except KeyError:
|
||||
pass
|
||||
continue
|
||||
# Pull all the pieces of the event.
|
||||
details = (pvm_event.detail.split(',') if pvm_event.detail
|
||||
else [])
|
||||
|
||||
@@ -47,7 +47,7 @@ class NvramStore(object):
|
||||
def store(self, instance, data, force=True):
|
||||
"""Store the NVRAM into the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
:param data: the NVRAM data base64 encoded string
|
||||
:param force: boolean whether an update should always be saved,
|
||||
otherwise, check to see if it's changed.
|
||||
@@ -57,7 +57,7 @@ class NvramStore(object):
|
||||
def fetch(self, instance):
|
||||
"""Fetch the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
:returns: the NVRAM data base64 encoded string
|
||||
"""
|
||||
|
||||
@@ -65,5 +65,5 @@ class NvramStore(object):
|
||||
def delete(self, instance):
|
||||
"""Delete the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
"""
|
||||
|
||||
@@ -18,6 +18,7 @@ import eventlet
|
||||
from nova import utils as n_utils
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
from pypowervm import const as pvm_const
|
||||
from pypowervm import exceptions as pvm_exc
|
||||
import six
|
||||
@@ -27,25 +28,25 @@ from nova_powervm.virt.powervm.nvram import api
|
||||
from nova_powervm.virt.powervm import vm
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOCK_NVRAM_UPDT_LIST = 'nvram_update_list'
|
||||
LOCK_NVRAM_UPDT_SET = 'nvram_update_set'
|
||||
LOCK_NVRAM_STORE = 'nvram_update'
|
||||
|
||||
|
||||
class NvramManager(object):
|
||||
"""The manager of the NVRAM store and fetch process.
|
||||
|
||||
This class uses two locks. One for controlling access to the list of
|
||||
instances to update the NVRAM for and another to control actually updating
|
||||
the NVRAM for the instance itself.
|
||||
This class uses two locks. One for controlling access to the set of
|
||||
instance uuids to update the NVRAM for and another to control actually
|
||||
updating the NVRAM for the instance itself.
|
||||
|
||||
An update to the instance store should always lock the update lock first
|
||||
and then get the list lock. There should never be a case where the list
|
||||
lock is acquired before the update lock. This can lead to deadlock cases.
|
||||
An update to the instance uuid store should always lock the update lock
|
||||
first and then get the set lock. There should never be a case where the set
|
||||
lock is acquired before the update lock. This can lead to deadlock cases.
|
||||
|
||||
NVRAM events for an instance come in spurts primarily during power on and
|
||||
off, from what has been observed so far. By using a dictionary and the
|
||||
instance.uuid as the key, rapid requests to store the NVRAM can be
|
||||
collapsed down into a single request (optimal).
|
||||
off, from what has been observed so far. By using a set of instance uuids,
|
||||
rapid requests to store the NVRAM can be collapsed down into a single
|
||||
request (optimal).
|
||||
"""
|
||||
|
||||
def __init__(self, store_api, adapter, host_uuid):
|
||||
@@ -60,7 +61,7 @@ class NvramManager(object):
|
||||
self._adapter = adapter
|
||||
self._host_uuid = host_uuid
|
||||
|
||||
self._update_list = {}
|
||||
self._update_set = set()
|
||||
self._queue = eventlet.queue.LightQueue()
|
||||
self._shutdown = False
|
||||
self._update_thread = n_utils.spawn(self._update_thread)
|
||||
@@ -72,7 +73,7 @@ class NvramManager(object):
|
||||
LOG.debug('NVRAM store manager shutting down.')
|
||||
self._shutdown = True
|
||||
# Remove all pending updates
|
||||
self._clear_list()
|
||||
self._clear_set()
|
||||
# Signal the thread to stop
|
||||
self._queue.put(None)
|
||||
self._update_thread.wait()
|
||||
@@ -80,100 +81,115 @@ class NvramManager(object):
|
||||
def store(self, instance, immediate=False):
|
||||
"""Store the NVRAM for an instance.
|
||||
|
||||
:param instance: The instance to store the NVRAM for.
|
||||
:param instance: The instance UUID OR instance object of the instance
|
||||
to store the NVRAM for.
|
||||
:param immediate: Force the update to take place immediately.
|
||||
Otherwise, the request is queued for asynchronous
|
||||
update.
|
||||
"""
|
||||
inst_uuid = (instance if
|
||||
uuidutils.is_uuid_like(instance) else instance.uuid)
|
||||
if immediate:
|
||||
self._update_nvram(instance=instance)
|
||||
self._update_nvram(instance_uuid=inst_uuid)
|
||||
else:
|
||||
# Add it to the list to update
|
||||
self._add_to_list(instance)
|
||||
self._add_to_set(inst_uuid)
|
||||
# Trigger the thread
|
||||
self._queue.put(instance.uuid, block=False)
|
||||
self._queue.put(inst_uuid, block=False)
|
||||
# Sleep so the thread gets a chance to run
|
||||
time.sleep(0)
|
||||
|
||||
def fetch(self, instance):
|
||||
"""Fetch the NVRAM for an instance.
|
||||
|
||||
:param instance: The instance to fetch the NVRAM for.
|
||||
:param instance: The instance UUID OR instance object of the instance
|
||||
to fetch the NVRAM for.
|
||||
:returns: The NVRAM data for the instance.
|
||||
"""
|
||||
inst_uuid = (instance if
|
||||
uuidutils.is_uuid_like(instance) else instance.uuid)
|
||||
try:
|
||||
return self._api.fetch(instance)
|
||||
return self._api.fetch(inst_uuid)
|
||||
except Exception as e:
|
||||
LOG.exception('Could not update NVRAM.', instance=instance)
|
||||
raise api.NVRAMDownloadException(instance=instance.name,
|
||||
LOG.exception(('Could not fetch NVRAM for instance with UUID %s.'),
|
||||
inst_uuid)
|
||||
raise api.NVRAMDownloadException(instance=inst_uuid,
|
||||
reason=six.text_type(e))
|
||||
|
||||
@lockutils.synchronized(LOCK_NVRAM_STORE)
|
||||
def remove(self, instance):
|
||||
"""Remove the stored NVRAM for an instance.
|
||||
|
||||
:param instance: The instance for which the NVRAM will be removed.
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
"""
|
||||
inst_uuid = (instance if
|
||||
uuidutils.is_uuid_like(instance) else instance.uuid)
|
||||
# Remove any pending updates
|
||||
self._pop_from_list(uuid=instance.uuid)
|
||||
self._pop_from_set(uuid=inst_uuid)
|
||||
# Remove it from the store
|
||||
try:
|
||||
self._api.delete(instance)
|
||||
self._api.delete(inst_uuid)
|
||||
except Exception:
|
||||
# Delete exceptions should not end the operation
|
||||
LOG.exception('Could not delete NVRAM.', instance=instance)
|
||||
LOG.exception(('Could not delete NVRAM for instance with UUID '
|
||||
'%s.'), inst_uuid)
|
||||
|
||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
|
||||
def _add_to_list(self, instance):
|
||||
"""Add an instance to the list of instances to store the NVRAM."""
|
||||
self._update_list[instance.uuid] = instance
|
||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_SET)
|
||||
def _add_to_set(self, instance_uuid):
|
||||
"""Add an instance uuid to the set of uuids to store the NVRAM."""
|
||||
self._update_set.add(instance_uuid)
|
||||
|
||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
|
||||
def _pop_from_list(self, uuid=None):
|
||||
"""Pop an instance off the list of instance to update.
|
||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_SET)
|
||||
def _pop_from_set(self, uuid=None):
|
||||
"""Pop an instance uuid off the set of instances to update.
|
||||
|
||||
:param uuid: The uuid of the instance to update or if not specified
|
||||
pull the next instance off the list.
|
||||
returns: The uuid and instance.
|
||||
pull the next instance uuid off the set.
|
||||
:returns: The instance uuid.
|
||||
"""
|
||||
try:
|
||||
if uuid is None:
|
||||
return self._update_list.popitem()
|
||||
return self._update_set.pop()
|
||||
else:
|
||||
return self._update_list.pop(uuid)
|
||||
self._update_set.remove(uuid)
|
||||
return uuid
|
||||
except KeyError:
|
||||
return None, None
|
||||
return None
|
||||
|
||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
|
||||
def _clear_list(self):
|
||||
"""Clear the list of instance to store NVRAM for."""
|
||||
self._update_list.clear()
|
||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_SET)
|
||||
def _clear_set(self):
|
||||
"""Clear the set of instance uuids to store NVRAM for."""
|
||||
self._update_set.clear()
|
||||
|
||||
@lockutils.synchronized(LOCK_NVRAM_STORE)
|
||||
def _update_nvram(self, instance=None):
|
||||
def _update_nvram(self, instance_uuid=None):
|
||||
"""Perform an update of NVRAM for instance.
|
||||
|
||||
:param instance: The instance to update or if not specified pull the
|
||||
next one off the list to update.
|
||||
:param instance_uuid: The instance uuid of the instance to update or if
|
||||
not specified pull the next one off the set to
|
||||
update.
|
||||
"""
|
||||
if instance is None:
|
||||
uuid, instance = self._pop_from_list()
|
||||
if uuid is None:
|
||||
if instance_uuid is None:
|
||||
instance_uuid = self._pop_from_set()
|
||||
if instance_uuid is None:
|
||||
return
|
||||
else:
|
||||
# Remove any pending updates
|
||||
self._pop_from_list(uuid=instance.uuid)
|
||||
self._pop_from_set(uuid=instance_uuid)
|
||||
|
||||
try:
|
||||
LOG.debug('Updating NVRAM.', instance=instance)
|
||||
LOG.debug('Updating NVRAM for instance with uuid: %s',
|
||||
instance_uuid)
|
||||
data = vm.get_instance_wrapper(
|
||||
self._adapter, instance, xag=[pvm_const.XAG.NVRAM]).nvram
|
||||
LOG.debug('NVRAM for instance: %s', data, instance=instance)
|
||||
self._adapter, instance_uuid, xag=[pvm_const.XAG.NVRAM]).nvram
|
||||
LOG.debug('NVRAM for instance with uuid %(uuid)s: %(data)s',
|
||||
{'uuid': instance_uuid, 'data': data})
|
||||
if data is not None:
|
||||
self._api.store(instance, data)
|
||||
self._api.store(instance_uuid, data)
|
||||
except pvm_exc.Error:
|
||||
# Update exceptions should not end the operation.
|
||||
LOG.exception('Could not update NVRAM.', instance=instance)
|
||||
LOG.exception('Could not update NVRAM for instance with uuid %s.',
|
||||
instance_uuid)
|
||||
|
||||
def _update_thread(self):
|
||||
"""The thread that is charged with updating the NVRAM store."""
|
||||
|
||||
@@ -30,6 +30,7 @@ from nova_powervm.virt.powervm.nvram import api
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import uuidutils
|
||||
from swiftclient import exceptions as swft_exc
|
||||
from swiftclient import service as swft_srv
|
||||
|
||||
@@ -124,11 +125,10 @@ class SwiftNvramStore(api.NvramStore):
|
||||
container=container)
|
||||
return self._get_name_from_listing(results)
|
||||
|
||||
def _store(self, inst_key, inst_name, data, exists=None):
|
||||
def _store(self, inst_key, data, exists=None):
|
||||
"""Store the NVRAM into the storage service.
|
||||
|
||||
:param inst_key: The key by which to store the data in the repository.
|
||||
:param inst_name: The name of the instance.
|
||||
:param data: the NVRAM data base64 encoded string
|
||||
:param exists: (Optional, Default: None) If specified, tells the upload
|
||||
whether or not the object exists. Should be a boolean
|
||||
@@ -185,7 +185,7 @@ class SwiftNvramStore(api.NvramStore):
|
||||
'token. Retrying upload.')
|
||||
return True
|
||||
# The upload failed.
|
||||
raise api.NVRAMUploadException(instance=inst_name,
|
||||
raise api.NVRAMUploadException(instance=inst_key,
|
||||
reason=result)
|
||||
return False
|
||||
try:
|
||||
@@ -194,23 +194,25 @@ class SwiftNvramStore(api.NvramStore):
|
||||
# The upload failed.
|
||||
reason = (_('Unable to store NVRAM after %d attempts') %
|
||||
re.last_attempt.attempt_number)
|
||||
raise api.NVRAMUploadException(instance=inst_name, reason=reason)
|
||||
raise api.NVRAMUploadException(instance=inst_key, reason=reason)
|
||||
|
||||
@lockutils.synchronized('nvram')
|
||||
def store(self, instance, data, force=True):
|
||||
"""Store the NVRAM into the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
:param data: the NVRAM data base64 encoded string
|
||||
:param force: boolean whether an update should always be saved,
|
||||
otherwise, check to see if it's changed.
|
||||
"""
|
||||
exists = self._exists(instance.uuid)
|
||||
inst_uuid = (instance if
|
||||
uuidutils.is_uuid_like(instance) else instance.uuid)
|
||||
exists = self._exists(inst_uuid)
|
||||
if not force and exists:
|
||||
# See if the entry exists and has not changed.
|
||||
results = self._run_operation('stat', options={'long': True},
|
||||
container=self.container,
|
||||
objects=[instance.uuid])
|
||||
objects=[inst_uuid])
|
||||
result = results[0]
|
||||
if result['success']:
|
||||
existing_hash = result['headers']['etag']
|
||||
@@ -218,11 +220,12 @@ class SwiftNvramStore(api.NvramStore):
|
||||
data = data.encode('ascii')
|
||||
md5 = hashlib.md5(data).hexdigest()
|
||||
if existing_hash == md5:
|
||||
LOG.info('NVRAM has not changed.', instance=instance)
|
||||
LOG.info(('NVRAM has not changed for instance with '
|
||||
'UUID %s.'), inst_uuid)
|
||||
return
|
||||
|
||||
self._store(instance.uuid, instance.name, data, exists=exists)
|
||||
LOG.debug('NVRAM updated', instance=instance)
|
||||
self._store(inst_uuid, data, exists=exists)
|
||||
LOG.debug('NVRAM updated for instance with UUID %s', inst_uuid)
|
||||
|
||||
def store_slot_map(self, inst_key, data):
|
||||
"""Store the Slot Map to Swift.
|
||||
@@ -230,7 +233,7 @@ class SwiftNvramStore(api.NvramStore):
|
||||
:param inst_key: The instance key to use for the storage operation.
|
||||
:param data: The data of the object to store. This should be a string.
|
||||
"""
|
||||
self._store(inst_key, inst_key, data)
|
||||
self._store(inst_key, data)
|
||||
|
||||
def fetch_slot_map(self, inst_key):
|
||||
"""Fetch the Slot Map object.
|
||||
@@ -243,12 +246,14 @@ class SwiftNvramStore(api.NvramStore):
|
||||
def fetch(self, instance):
|
||||
"""Fetch the NVRAM from the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object or instance UUID.
|
||||
:returns: the NVRAM data base64 encoded string
|
||||
"""
|
||||
data, result = self._fetch(instance.uuid)
|
||||
inst_uuid = (instance if
|
||||
uuidutils.is_uuid_like(instance) else instance.uuid)
|
||||
data, result = self._fetch(inst_uuid)
|
||||
if not data:
|
||||
raise api.NVRAMDownloadException(instance=instance.name,
|
||||
raise api.NVRAMDownloadException(instance=inst_uuid,
|
||||
reason=result)
|
||||
return data
|
||||
|
||||
@@ -304,12 +309,15 @@ class SwiftNvramStore(api.NvramStore):
|
||||
def delete(self, instance):
|
||||
"""Delete the NVRAM into the storage service.
|
||||
|
||||
:param instance: instance object
|
||||
:param instance: The nova instance object OR instance UUID.
|
||||
"""
|
||||
inst_uuid = (instance if
|
||||
uuidutils.is_uuid_like(instance) else instance.uuid)
|
||||
for result in self._run_operation('delete', container=self.container,
|
||||
objects=[instance.uuid]):
|
||||
objects=[inst_uuid]):
|
||||
|
||||
LOG.debug('Delete result: %s', str(result), instance=instance)
|
||||
LOG.debug('Delete result for instance with UUID %(inst_uuid)s: '
|
||||
'%(res)s', {'inst_uuid': inst_uuid, 'res': result})
|
||||
if not result['success']:
|
||||
raise api.NVRAMDeleteException(instance=instance.name,
|
||||
raise api.NVRAMDeleteException(instance=inst_uuid,
|
||||
reason=result)
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
import re
|
||||
import six
|
||||
|
||||
@@ -459,7 +460,7 @@ def get_instance_wrapper(adapter, instance, xag=None):
|
||||
"""Get the LPAR wrapper for a given Nova instance.
|
||||
|
||||
:param adapter: The adapter for the pypowervm API
|
||||
:param instance: The nova instance.
|
||||
:param instance: The nova instance OR its instance uuid.
|
||||
:param xag: The pypowervm XAG to be used on the read request
|
||||
:return: The pypowervm logical_partition wrapper.
|
||||
"""
|
||||
@@ -738,10 +739,12 @@ def get_pvm_uuid(instance):
|
||||
algorithm against the instance's uuid to convert it to the PowerVM
|
||||
UUID.
|
||||
|
||||
:param instance: nova.objects.instance.Instance
|
||||
:param instance: nova.objects.instance.Instance OR the OpenStack instance
|
||||
uuid.
|
||||
:return: pvm_uuid.
|
||||
"""
|
||||
return pvm_uuid.convert_uuid_to_pvm(instance.uuid).upper()
|
||||
inst_uuid = instance if uuidutils.is_uuid_like(instance) else instance.uuid
|
||||
return pvm_uuid.convert_uuid_to_pvm(inst_uuid).upper()
|
||||
|
||||
|
||||
def _uuid_set_high_bit(pvm_uuid):
|
||||
|
||||
Reference in New Issue
Block a user