Implmement NVRAM store and manager

PowerVM has NVRAM that is associated with each VM on the system.
The NVRAM holds information about the VM such as which device
is the boot device. PowerVM produces an event when the NVRAM for
a VM is changed.

The NVRAM should be set on the VM during operations like cold
migration, resize to a different host and evacuation when the
VM is newly created on the destination host from an existing VM
on the source host.

This change set introduces the concept of an NVRAM store and
an NVRAM manager. The compute driver listens for the PowerVM
NVRAM change events and stores the NVRAM in the configured store.

A Swift store for the NVRAM is implemented.

Future change sets will utilize the NVRAM from the store on VM
operations and manage its life cycle. This change set does
include storing the NVRAM during migrate/resize and was included
to facilitate live testing.

Change-Id: I59be9946794f53fdcae5515cc818d4fe967986d0
This commit is contained in:
Kyle L. Henderson 2016-01-29 07:19:41 -06:00
parent bc31751f45
commit e71902d8dc
16 changed files with 902 additions and 10 deletions

View File

@ -136,8 +136,37 @@ npiv_opts = [
'\'fabric_<identifier>_port_wwpns\' key.')
]
remote_restart_opts = [
cfg.StrOpt('nvram_store',
choices=['none', 'swift'], ignore_case=True,
default='none',
help='The NVRAM store to use to hold the PowerVM NVRAM for '
'virtual machines.'),
]
swift_opts = [
cfg.StrOpt('swift_container', default='powervm_nvram',
help='The Swift container to store the PowerVM NVRAM in. This '
'must be configured the same value for all compute hosts.'),
cfg.StrOpt('swift_username', default='powervm',
help='The Swift user name to use for operations that use '
'the Swift store.'),
cfg.StrOpt('swift_user_domain_name', default='powervm',
help='The Swift domain the user is a member of.'),
cfg.StrOpt('swift_password', help='The password for the Swift '
'user.'),
cfg.StrOpt('swift_project_name', default='powervm',
help='The Swift project.'),
cfg.StrOpt('swift_project_domain_name', default='powervm',
help='The Swift project domain.'),
cfg.StrOpt('swift_auth_version', default='3', help='The Keystone API '
'version.'),
cfg.StrOpt('swift_auth_url', help='The Keystone authorization url. '
'Example: "http://keystone-hostname:5000/v3"'),
]
STATIC_OPTIONS = (powervm_opts + localdisk_opts + ssp_opts + vol_adapter_opts
+ npiv_opts)
+ npiv_opts + remote_restart_opts + swift_opts)
# Dictionary where the key is the NPIV Fabric Name, and the value is a list of
# Physical WWPNs that match the key.

View File

@ -47,7 +47,8 @@ TEST_INSTANCE = {
'power_state': power_state.SHUTDOWN,
}
TEST_INST_SPAWNING = dict(TEST_INSTANCE, task_state=task_states.SPAWNING)
TEST_INST_SPAWNING = dict(TEST_INSTANCE, task_state=task_states.SPAWNING,
uuid='b3c04455-a435-499d-ac81-371d2a2d334f')
TEST_INST1 = instance.Instance(**TEST_INSTANCE)
TEST_INST2 = instance.Instance(**TEST_INST_SPAWNING)

View File

@ -0,0 +1,45 @@
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova_powervm.virt.powervm.nvram import api
class NoopNvramStore(api.NvramStore):
def store(self, instance, data, force=True):
"""Store the NVRAM into the storage service.
:param instance: instance object
:param data: the NVRAM data base64 encoded string
:param force: boolean whether an update should always be saved,
otherwise, check to see if it's changed.
"""
pass
def fetch(self, instance):
"""Fetch the NVRAM from the storage service.
:param instance: instance object
:returns: the NVRAM data base64 encoded string
"""
return None
def delete(self, instance):
"""Delete the NVRAM from the storage service.
:param instance: instance object
"""
pass

View File

@ -0,0 +1,56 @@
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from nova import test
import time
from nova_powervm.tests.virt import powervm
from nova_powervm.tests.virt.powervm.nvram import fake_api
from nova_powervm.virt.powervm.nvram import manager
from nova_powervm.virt.powervm import vm
class TestNvramManager(test.TestCase):
def setUp(self):
super(TestNvramManager, self).setUp()
self.fake_store = fake_api.NoopNvramStore()
self.mock_store = self.useFixture(
fixtures.MockPatchObject(self.fake_store, 'store')).mock
self.mock_fetch = self.useFixture(
fixtures.MockPatchObject(self.fake_store, 'fetch')).mock
@mock.patch.object(vm, 'get_instance_wrapper')
def test_manager(self, mock_get_inst):
mgr = manager.NvramManager(self.fake_store, mock.Mock(), mock.Mock())
mgr.store(powervm.TEST_INST1)
mgr.store(powervm.TEST_INST2)
mgr.fetch(powervm.TEST_INST2)
# Simulate a quick repeated stores of the same LPAR by poking the Q.
mgr._queue.put(powervm.TEST_INST1)
mgr._queue.put(powervm.TEST_INST1)
mgr._queue.put(powervm.TEST_INST2)
time.sleep(0)
mgr.shutdown()
self.mock_store.assert_has_calls(
[mock.call(powervm.TEST_INST1, mock.ANY),
mock.call(powervm.TEST_INST2, mock.ANY)])
self.mock_fetch.assert_called_with(powervm.TEST_INST2)

View File

@ -0,0 +1,166 @@
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from nova import test
from swiftclient import service as swft_srv
from nova_powervm.tests.virt import powervm
from nova_powervm.virt.powervm.nvram import api
from nova_powervm.virt.powervm.nvram import swift
class TestSwiftStore(test.TestCase):
def setUp(self):
super(TestSwiftStore, self).setUp()
self.flags(swift_password='secret', swift_auth_url='url',
group='powervm')
self.swift_store = swift.SwiftNvramStore()
self.swift_srv = self.useFixture(
fixtures.MockPatch('swiftclient.service.SwiftService')).mock
def test_run_operation(self):
fake_result = [{'key1': 'value1'}, {'2key1', '2value1'}]
fake_result2 = fake_result[0]
def fake_generator(alist):
for item in alist:
yield item
# Address the 'list' method that should be called.
list_op = (self.swift_srv.return_value.__enter__.
return_value.list)
# Setup expected results
list_op.return_value = fake_generator(fake_result)
results = self.swift_store._run_operation(None, 'list', 1, x=2)
self.swift_srv.assert_called_once_with(
options=self.swift_store.options)
list_op.assert_called_once_with(1, x=2)
# Returns a copy of the results
self.assertEqual(results, fake_result)
self.assertNotEqual(id(results), id(fake_result))
# Try a single result - Setup expected results
list_op.reset_mock()
list_op.return_value = fake_result2
results = self.swift_store._run_operation(None, 'list', 3, x=4)
list_op.assert_called_once_with(3, x=4)
# Returns the actual result
self.assertEqual(results, fake_result2)
self.assertEqual(id(results), id(fake_result2))
# Should raise any swift errors encountered
list_op.side_effect = swft_srv.SwiftError('Error message.')
self.assertRaises(swft_srv.SwiftError, self.swift_store._run_operation,
None, 'list', 3, x=4)
def _build_results(self, names):
listing = [{'name': name} for name in names]
return [{'success': True, 'listing': listing}]
def test_get_name_from_listing(self):
names = self.swift_store._get_name_from_listing(
self._build_results(['snoopy']))
self.assertEqual(['snoopy'], names)
def test_get_container_names(self):
with mock.patch.object(self.swift_store, '_run_operation') as mock_run:
mock_run.return_value = self._build_results(['container'])
names = self.swift_store._get_container_names()
self.assertEqual(['container'], names)
mock_run.assert_called_once_with(None, 'list',
options={'long': True})
def test_get_object_names(self):
with mock.patch.object(self.swift_store, '_run_operation') as mock_run:
mock_run.return_value = self._build_results(['obj', 'obj2'])
names = self.swift_store._get_object_names('powervm_nvram')
self.assertEqual(['obj', 'obj2'], names)
mock_run.assert_called_once_with(None, 'list',
container='powervm_nvram',
options={'long': True})
def test_underscore_store(self):
with mock.patch.object(self.swift_store, '_run_operation') as mock_run:
mock_run.return_value = self._build_results(['obj'])
self.swift_store._store(powervm.TEST_INST1, 'data')
mock_run.assert_called_once_with(None, 'upload', 'powervm_nvram',
mock.ANY)
# Test unsuccessful upload
mock_run.return_value[0]['success'] = False
self.assertRaises(api.NVRAMUploadException,
self.swift_store._store, powervm.TEST_INST1,
'data')
def test_store(self):
# Test forcing a update
with mock.patch.object(self.swift_store, '_store') as mock_store:
self.swift_store.store(powervm.TEST_INST1, 'data', force=True)
mock_store.assert_called_once_with(powervm.TEST_INST1, 'data')
with mock.patch.object(
self.swift_store, '_store') as mock_store, mock.patch.object(
self.swift_store, '_run_operation') as mock_run:
data_md5_hash = '8d777f385d3dfec8815d20f7496026dc'
results = self._build_results(['obj'])
results[0]['headers'] = {'etag': data_md5_hash}
mock_run.return_value = results
self.swift_store.store(powervm.TEST_INST1, 'data', force=False)
self.assertFalse(mock_store.called)
mock_run.assert_called_once_with(
None, 'stat', options={'long': True},
container='powervm_nvram', objects=[powervm.TEST_INST1.uuid])
@mock.patch('os.remove')
@mock.patch('tempfile.NamedTemporaryFile')
def test_fetch(self, mock_tmpf, mock_rmv):
with mock.patch('nova_powervm.virt.powervm.nvram.swift.open',
mock.mock_open(read_data='data to read')
) as m_open, mock.patch.object(
self.swift_store, '_run_operation') as mock_run:
mock_run.return_value = self._build_results(['obj'])
mock_tmpf.return_value.__enter__.return_value.name = 'fname'
data = self.swift_store.fetch(powervm.TEST_INST1)
self.assertEqual('data to read', data)
mock_rmv.assert_called_once_with(m_open.return_value.name)
# Bad result from the download
mock_run.return_value[0]['success'] = False
self.assertRaises(api.NVRAMDownloadException,
self.swift_store.fetch, powervm.TEST_INST1)
def test_delete(self):
with mock.patch.object(self.swift_store, '_run_operation') as mock_run:
mock_run.return_value = self._build_results(['obj'])
self.swift_store.delete(powervm.TEST_INST1)
mock_run.assert_called_once_with(None, 'delete',
container='powervm_nvram',
objects=[powervm.TEST_INST1.uuid])
# Bad result from the operation
mock_run.return_value[0]['success'] = False
self.assertRaises(api.NVRAMDeleteException,
self.swift_store.delete, powervm.TEST_INST1)

View File

@ -47,3 +47,11 @@ class TestVMTasks(test.TestCase):
mock_vm_rename.assert_called_once_with(self.apt, 'host_uuid',
self.instance, 'new_name')
self.assertEqual('new_entry', new_entry)
def test_store_nvram(self):
nvram_mgr = mock.Mock()
store_nvram = tf_vm.StoreNvram(nvram_mgr, self.instance,
immediate=True)
store_nvram.execute()
nvram_mgr.store.assert_called_once_with(self.instance,
immediate=True)

View File

@ -126,6 +126,40 @@ class TestPowerVMDriver(test.TestCase):
# The local disk driver has been mocked, so we just compare the name
self.assertIn('LocalStorage()', str(self.drv.disk_dvr))
@mock.patch('nova_powervm.virt.powervm.nvram.manager.NvramManager')
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch('nova.utils.spawn')
def test_setup_nvram_store(self, mock_spawn, mock_import, mock_mgr):
self.flags(nvram_store='NoNe', group='powervm')
self.drv._setup_nvram_store()
self.assertFalse(mock_import.called)
self.assertFalse(mock_mgr.called)
self.assertFalse(mock_spawn.called)
self.flags(nvram_store='swift', group='powervm')
self.drv._setup_nvram_store()
self.assertTrue(mock_import.called)
self.assertTrue(mock_mgr.called)
self.assertTrue(mock_spawn.called)
@mock.patch.object(vm, 'get_lpars')
@mock.patch.object(vm, 'get_instance')
def test_nvram_host_startup(self, mock_get_inst, mock_get_lpars):
mock_lpar_wrapper = mock.Mock()
mock_lpar_wrapper.uuid = 'uuid_value'
mock_get_lpars.return_value = [mock_lpar_wrapper,
mock_lpar_wrapper,
mock_lpar_wrapper]
mock_get_inst.side_effect = [powervm.TEST_INST1,
None,
powervm.TEST_INST2]
self.drv.nvram_mgr = mock.Mock()
self.drv._nvram_host_startup()
self.drv.nvram_mgr.store.assert_has_calls(
[mock.call(powervm.TEST_INST1), mock.call(powervm.TEST_INST2)])
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
@mock.patch('nova.context.get_admin_context')
def test_driver_ops(self, mock_get_ctx, mock_getuuid):
@ -1037,6 +1071,7 @@ class TestPowerVMDriver(test.TestCase):
# Tasks expected to be added for migrate
expected = [
'pwr_off_lpar',
'store_nvram',
'extend_disk_boot',
'disconnect_vol_*',
'disconnect_vol_*',
@ -1050,7 +1085,8 @@ class TestPowerVMDriver(test.TestCase):
mock_bdms)
taskflow_fix.assert_tasks_added(self, expected)
# Check the size set in the resize task
extend_task = taskflow_fix.tasks_added[1]
extend_task = taskflow_fix.tasks_added[
expected.index('extend_disk_boot')]
self.assertEqual(extend_task.size, 12)
self.san_lpar_name.assert_called_with('migrate_' + self.inst.name)
@ -1161,7 +1197,7 @@ class TestPowerVMDriver(test.TestCase):
entry = (r'Operation: %(op)s. Virtual machine display '
'name: %(display_name)s, name: %(name)s, '
'UUID: %(uuid)s')
msg_dict = {'uuid': '49629a5c-f4c4-4721-9511-9725786ff2e5',
msg_dict = {'uuid': 'b3c04455-a435-499d-ac81-371d2a2d334f',
'display_name': u'Fake Instance',
'name': 'instance-00000001',
'op': 'fake_op'}
@ -1619,6 +1655,15 @@ class TestNovaEventHandler(test.TestCase):
'EventID': '1452692619566',
'EventDetail': 'RMCState,PartitionState,Other',
},
{
'EventType': 'MODIFY_URI',
'EventData': 'http://localhost:12080/rest/api/uom/Managed'
'System/c889bf0d-9996-33ac-84c5-d16727083a77/'
'LogicalPartition/794654F5-B6E9-4A51-BEC2-'
'A73E41EAA938',
'EventID': '1452692619566',
'EventDetail': 'NVRAM',
},
]
mock_qprops.return_value = pvm_bp.LPARState.RUNNING
@ -1626,3 +1671,4 @@ class TestNovaEventHandler(test.TestCase):
self.handler.process(event_data)
self.assertTrue(self.mock_driver.emit_event.called)
self.assertTrue(self.mock_driver.nvram_mgr.store.called)

View File

@ -27,13 +27,13 @@ from nova import utils as n_utils
from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event
import re
from oslo_log import log as logging
from oslo_utils import importutils
import re
import six
from taskflow import engines as tf_eng
from taskflow.patterns import linear_flow as tf_lf
import time
from pypowervm import adapter as pvm_apt
from pypowervm import exceptions as pvm_exc
@ -57,6 +57,7 @@ from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import image as img
from nova_powervm.virt.powervm import live_migration as lpm
from nova_powervm.virt.powervm import mgmt
from nova_powervm.virt.powervm.nvram import manager as nvram_mgr
from nova_powervm.virt.powervm.tasks import image as tf_img
from nova_powervm.virt.powervm.tasks import network as tf_net
from nova_powervm.virt.powervm.tasks import storage as tf_stg
@ -81,6 +82,11 @@ DISK_ADPT_MAPPINGS = {
'localdisk': 'localdisk.LocalStorage',
'ssp': 'ssp.SSPDiskAdapter'
}
# NVRAM store APIs for the NVRAM manager to use
NVRAM_NS = 'nova_powervm.virt.powervm.nvram.'
NVRAM_APIS = {
'swift': 'swift.SwiftNvramStore',
}
class PowerVMDriver(driver.ComputeDriver):
@ -104,6 +110,8 @@ class PowerVMDriver(driver.ComputeDriver):
# Live migrations
self.live_migrations = {}
# Set the nvram mgr to None so events are not handled until it's setup
self.nvram_mgr = None
# Get an adapter
self._get_adapter()
# First need to resolve the managed host UUID
@ -115,6 +123,8 @@ class PowerVMDriver(driver.ComputeDriver):
self._get_disk_adapter()
self.image_api = image.API()
self._setup_nvram_store()
# Init Host CPU Statistics
self.host_cpu_stats = pvm_host.HostCPUStats(self.adapter,
self.host_uuid)
@ -151,6 +161,32 @@ class PowerVMDriver(driver.ComputeDriver):
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
conn_info)
def _setup_nvram_store(self):
"""Setup the NVRAM store for remote restart."""
store = CONF.powervm.nvram_store.lower()
if store != 'none':
store_api = importutils.import_object(
NVRAM_NS + NVRAM_APIS[store])
# Events will be handled once the nvram_mgr is set.
self.nvram_mgr = nvram_mgr.NvramManager(
store_api, self.adapter, self.host_uuid)
# Do host startup for NVRAM for existing VMs on the host
n_utils.spawn(self._nvram_host_startup)
def _nvram_host_startup(self):
"""NVRAM Startup.
When the compute node starts up, it's not known if any NVRAM events
were missed when the compute process was not running. During startup
put each LPAR on the queue to be updated, just incase.
"""
for lpar_w in vm.get_lpars(self.adapter):
# Find the instance for the LPAR.
inst = vm.get_instance(ctx.get_admin_context(), lpar_w.uuid)
if inst is not None and inst.host == CONF.host:
self.nvram_mgr.store(inst)
time.sleep(0)
def _get_host_uuid(self):
"""Get the System wrapper and its UUID for the (single) host."""
syswraps = pvm_ms.System.wrap(
@ -1046,6 +1082,11 @@ class PowerVMDriver(driver.ComputeDriver):
flow.add(tf_vm.PowerOff(self.adapter, self.host_uuid,
pvm_inst_uuid, instance))
if not same_host:
# If VM is moving to a new host make sure the NVRAM is at the very
# latest.
flow.add(tf_vm.StoreNvram(self.nvram_mgr, instance,
immediate=True))
if flav_obj.root_gb > instance.root_gb:
# Resize the root disk
flow.add(tf_stg.ExtendDisk(self.disk_dvr, context, instance,
@ -1727,7 +1768,7 @@ class PowerVMDriver(driver.ComputeDriver):
class NovaEventHandler(pvm_apt.RawEventHandler):
"""Used to receive and handle events from PowerVM."""
inst_actions_handled = {'PartitionState'}
inst_actions_handled = {'PartitionState', 'NVRAM'}
def __init__(self, driver):
self._driver = driver
@ -1789,6 +1830,11 @@ class NovaEventHandler(pvm_apt.RawEventHandler):
'change to: %s'), pvm_state, instance=inst)
self._driver.emit_event(lce)
# If the NVRAM has changed for this instance and a store is configured.
if 'NVRAM' in details and self._driver.nvram_mgr is not None:
# Schedule the NVRAM for the instance to be stored.
self._driver.nvram_mgr.store(inst)
def process(self, events):
"""Process the event that comes back from PowerVM.

View File

@ -0,0 +1,69 @@
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from nova import exception as nex
import six
from nova_powervm.virt.powervm.i18n import _
class NVRAMUploadException(nex.NovaException):
msg_fmt = _("The NVRAM could not be stored for instance %(instance)s. "
"Reason: %(reason)s")
class NVRAMDownloadException(nex.NovaException):
msg_fmt = _("The NVRAM could not be fetched for instance %(instance)s. "
"Reason: %(reason)s")
class NVRAMDeleteException(nex.NovaException):
msg_fmt = _("The NVRAM could not be deleted for instance %(instance)s. "
"Reason: %(reason)s")
class NVRAMConfigOptionNotSet(nex.NovaException):
msg_fmt = _("The configuration option '%(option)s' must be set.")
@six.add_metaclass(abc.ABCMeta)
class NvramStore(object):
@abc.abstractmethod
def store(self, instance, data, force=True):
"""Store the NVRAM into the storage service.
:param instance: instance object
:param data: the NVRAM data base64 encoded string
:param force: boolean whether an update should always be saved,
otherwise, check to see if it's changed.
"""
@abc.abstractmethod
def fetch(self, instance):
"""Fetch the NVRAM from the storage service.
:param instance: instance object
:returns: the NVRAM data base64 encoded string
"""
@abc.abstractmethod
def delete(self, instance):
"""Delete the NVRAM from the storage service.
:param instance: instance object
"""

View File

@ -0,0 +1,208 @@
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from nova import utils as n_utils
from oslo_concurrency import lockutils
from oslo_log import log as logging
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
import time
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
LOCK_NVRAM_UPDT_LIST = 'nvram_update_list'
LOCK_NVRAM_STORE = 'nvram_update'
class NvramManager(object):
"""The manager of the NVRAM store and fetch process.
This class uses two locks. One for controlling access to the list of
instances to update the NVRAM for and another to control actually updating
the NVRAM for the instance itself.
An update to the instance store should always lock the update lock first
and then get the list lock. There should never be a case where the list
lock is acquired before the update lock. This can lead to deadlock cases.
NVRAM events for an instance come in spurts primarily during power on and
off, from what has been observed so far. By using a dictionary and the
instance.uuid as the key, rapid requests to store the NVRAM can be
collapsed down into a single request (optimal).
"""
def __init__(self, store_api, adapter, host_uuid):
"""Create the manager.
:param store_api: the NvramStore api to use.
:param adapter: pypowervm Adapter
:param host_uuid: powervm host uuid string
"""
super(NvramManager, self).__init__()
self._api = store_api
self._adapter = adapter
self._host_uuid = host_uuid
self._update_list = {}
self._queue = eventlet.queue.LightQueue()
self._shutdown = False
self._update_thread = n_utils.spawn(self._update_thread)
LOG.debug('NVRAM store manager started.')
def shutdown(self):
"""Shutdown the NVRAM Manager."""
LOG.debug('NVRAM store manager shutting down.')
self._shutdown = True
# Remove all pending updates
self._clear_list()
# Signal the thread to stop
self._queue.put(None)
self._update_thread.wait()
def store(self, instance, immediate=False):
"""Store the NVRAM for an instance.
:param instance: The instance to store the NVRAM for.
:param immediate: Force the update to take place immediately.
Otherwise, the request is queued for asynchronous
update.
"""
if immediate:
self._update_instance(instance=instance)
else:
# Add it to the list to update
self._add_to_list(instance)
# Trigger the thread
self._queue.put(instance.uuid, block=False)
# Sleep so the thread gets a chance to run
time.sleep(0)
def fetch(self, instance):
"""Fetch the NVRAM for an instance.
:param instance: The instance to fetch the NVRAM for.
:returns: The NVRAM data for the instance or None if the data could not
be fetched.
"""
try:
return self._api.fetch(instance)
except Exception as e:
# Fetch exceptions should not end the operation.
LOG.exception(_LE('Could not update NVRAM: %s'), e,
instance=instance)
return None
@lockutils.synchronized(LOCK_NVRAM_STORE)
def remove(self, instance):
"""Remove the stored NVRAM for an instance.
:param instance: The instance for which the NVRAM will be removed.
"""
# Remove any pending updates
self._pop_from_list(uuid=instance.uuid)
# Remove it from the store
self._api.delete(instance)
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
def _add_to_list(self, instance):
"""Add an instance to the list of instances to store the NVRAM."""
self._update_list[instance.uuid] = instance
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
def _pop_from_list(self, uuid=None):
"""Pop an instance off the list of instance to update.
:param uuid: The uuid of the instance to update or if not specified
pull the next instance off the list.
returns: The uuid and instance.
"""
try:
if uuid is None:
return self._update_list.popitem()
else:
return self._update_list.pop(uuid)
except KeyError:
return None, None
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
def _clear_list(self):
"""Clear the list of instance to store NVRAM for."""
self._update_list.clear()
@lockutils.synchronized(LOCK_NVRAM_STORE)
def _update_instance(self, instance=None):
"""Perform an update of NVRAM for instance.
:param instance: The instance to update or if not specified pull the
next one off the list to update.
"""
if instance is None:
uuid, instance = self._pop_from_list()
if uuid is None:
return
else:
# Remove any pending updates
self._pop_from_list(uuid=instance.uuid)
try:
LOG.debug('Updating NVRAM for instance: %s', instance.uuid)
data = self._get_data(instance)
if data is not None:
self._api.store(instance, data)
except Exception as e:
# Update exceptions should not end the operation.
LOG.exception(_LE('Could not update NVRAM: %s'), e,
instance=instance)
def _get_data(self, instance):
"""Get the NVRAM data for the instance.
:param inst: The instance to get the data for.
:returns: The NVRAM data for the instance.
"""
data = None
try:
# Get the data from the adapter.
entry = vm.get_instance_wrapper(self._adapter, instance,
self._host_uuid,
xag=pvm_const.XAG.NVRAM)
data = entry.nvram
LOG.debug('NVRAM for instance: %s', data, instance=instance)
except pvm_exc.HttpError as e:
# The VM might have been deleted since the store request.
if e.response.status not in ['404']:
LOG.exception(e)
LOG.warning(_LW('Unable to store the NVRAM for instance: '
'%s'), instance.name)
return data
def _update_thread(self):
"""The thread that is charged with updating the NVRAM store."""
LOG.debug('NVRAM store manager update thread started.')
# Loop until it's time to shut down
while not self._shutdown:
if self._queue.get(block=True) is None:
LOG.debug('NVRAM store manager update thread is ending.')
return
self._update_instance()
time.sleep(0)

View File

@ -0,0 +1,195 @@
# Copyright 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import hashlib
import os
import six
import tempfile
import types
from nova_powervm import conf as cfg
from nova_powervm.conf import powervm
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm.nvram import api
from oslo_concurrency import lockutils
from oslo_log import log as logging
from swiftclient import service as swft_srv
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SwiftNvramStore(api.NvramStore):
def __init__(self):
super(SwiftNvramStore, self).__init__()
self.container = CONF.powervm.swift_container
# Build the swift service options
self.options = self._init_swift()
def _init_swift(self):
"""Initialize all the options needed to communicate with Swift."""
for opt in powervm.swift_opts:
if getattr(CONF.powervm, opt.name) is None:
raise api.NVRAMConfigOptionNotSet(option=opt.name)
options = {
'auth_version': CONF.powervm.swift_auth_version,
'os_username': CONF.powervm.swift_username,
'os_user_domain_name': CONF.powervm.swift_user_domain_name,
'os_password': CONF.powervm.swift_password,
'os_project_name': CONF.powervm.swift_project_name,
'os_project_domain_name': CONF.powervm.swift_project_domain_name,
'os_auth_url': CONF.powervm.swift_auth_url,
}
return options
def _run_operation(self, service_options, f, *args, **kwargs):
"""Convenience method to call the Swift client service."""
service_options = (self.options if service_options is None
else service_options)
with swft_srv.SwiftService(options=service_options) as swift:
# Get the function to call
func = getattr(swift, f)
try:
result = func(*args, **kwargs)
# For generators we have to copy the results because the
# service is going out of scope.
if isinstance(result, types.GeneratorType):
results = []
LOG.debug('SwiftOperation results:')
for r in result:
results.append(copy.deepcopy(r))
LOG.debug(str(r))
result = results
else:
LOG.debug('SwiftOperation result: %s' % str(result))
return result
except swft_srv.SwiftError as e:
LOG.exception(e)
raise
@classmethod
def _get_name_from_listing(cls, results):
names = []
for result in results:
if result['success']:
for obj in result['listing']:
names.append(obj['name'])
return names
def _get_container_names(self):
results = self._run_operation(None, 'list', options={'long': True})
return self._get_name_from_listing(results)
def _get_object_names(self, container):
results = self._run_operation(None, 'list', options={'long': True},
container=container)
return self._get_name_from_listing(results)
def _store(self, instance, data):
"""Store the NVRAM into the storage service.
:param instance: instance object
:param data: the NVRAM data base64 encoded string
"""
source = six.StringIO(data)
obj = swft_srv.SwiftUploadObject(source, object_name=instance.uuid)
for result in self._run_operation(None, 'upload', self.container,
[obj]):
if not result['success']:
# The upload failed.
raise api.NVRAMUploadException(instance=instance.name,
reason=result)
@lockutils.synchronized('nvram')
def store(self, instance, data, force=True):
"""Store the NVRAM into the storage service.
:param instance: instance object
:param data: the NVRAM data base64 encoded string
:param force: boolean whether an update should always be saved,
otherwise, check to see if it's changed.
"""
if not force:
# See if the entry exists and has not changed.
results = self._run_operation(None, 'stat', options={'long': True},
container=self.container,
objects=[instance.uuid])
result = results[0]
if result['success']:
existing_hash = result['headers']['etag']
if six.PY3:
data = data.encode('ascii')
md5 = hashlib.md5(data).hexdigest()
if existing_hash == md5:
LOG.info(_LI('NVRAM has not changed for instance: %s'),
instance.name, instance=instance)
return
self._store(instance, data)
LOG.debug('NVRAM updated for instance: %s' % instance.name)
def fetch(self, instance):
"""Fetch the NVRAM from the storage service.
:param instance: instance object
:returns: the NVRAM data base64 encoded string
"""
try:
# Create a temp file for download into
with tempfile.NamedTemporaryFile(delete=False) as f:
options = {
'out_file': f.name
}
# The file is now created and closed for the swift client to use.
for result in self._run_operation(
None, 'download', container=self.container,
objects=[instance.uuid], options=options):
if result['success']:
with open(f.name, 'r') as f:
return f.read()
else:
raise api.NVRAMDownloadException(instance=instance.name,
reason=result)
finally:
try:
os.remove(f.name)
except Exception:
LOG.warning(_LW('Could not remove temporary file: %s'), f.name)
def delete(self, instance):
"""Delete the NVRAM into the storage service.
:param instance: instance object
"""
for result in self._run_operation(
None, 'delete', container=self.container,
objects=[instance.uuid]):
# TODO(KYLEH): Not sure what to log here yet.
LOG.debug('Delete result: %s' % str(result), instance=instance)
if not result['success']:
raise api.NVRAMDeleteException(instance=instance.name,
reason=result)

View File

@ -224,6 +224,27 @@ class PowerOff(task.Task):
force_immediate=self.force_immediate)
class StoreNvram(task.Task):
"""Store the NVRAM for an instance."""
def __init__(self, nvram_mgr, instance, immediate=False):
"""Creates a task to store the NVRAM of an instance.
:param nvram_mgr: The NVRAM manager.
:param instance: The nova instance.
:param immediate: boolean whether to update the NVRAM immediately
"""
super(StoreNvram, self).__init__(name='store_nvram')
self.nvram_mgr = nvram_mgr
self.instance = instance
self.immediate = immediate
def execute(self):
if self.nvram_mgr is not None:
self.nvram_mgr.store(self.instance, immediate=self.immediate)
class Delete(task.Task):
"""The task to delete the instance from the system."""

View File

@ -440,18 +440,19 @@ def get_lpar_names(adapter):
return [x.name for x in get_lpars(adapter)]
def get_instance_wrapper(adapter, instance, host_uuid):
def get_instance_wrapper(adapter, instance, host_uuid, xag=None):
"""Get the LPAR wrapper for a given Nova instance.
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance.
:param host_uuid: (TEMPORARY) The host UUID
:param host_uuid: The host UUID
:param xag: The pypowervm XAG to be used on the read request
:return: The pypowervm logical_partition wrapper.
"""
pvm_inst_uuid = get_pvm_uuid(instance)
resp = adapter.read(pvm_ms.System.schema_type, root_id=host_uuid,
child_type=pvm_lpar.LPAR.schema_type,
child_id=pvm_inst_uuid)
child_id=pvm_inst_uuid, xag=xag)
return pvm_lpar.LPAR.wrap(resp)

View File

@ -5,6 +5,7 @@ coverage>=3.6
discover
fixtures>=1.3.1
python-subunit>=0.0.18 # Apache-2.0/BSD
python-swiftclient>=2.7.0 # Apache-2.0
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3 # BSD
oslosphinx>=2.5.0,!=3.4.0 # Apache-2.0
oslotest>=1.10.0 # Apache-2.0