Driver cleanup work

This change set does some driver clean up.  It does the following:
 - Move the mgmt_uuid into the mgmt.py file.  It stores it once on first
   call as it will not change over the life of the driver process.
 - Change the calls to use this new attribute
 - Updates the owner of the driver
 - Removes the host_uuid from the get_instance_wrapper (which wasn't
   used, but had a lot of things contorting to pass the host_uuid
   around).
 - Change the disk connection to remove the connection dictionary.

Change-Id: I5efd642e84fc48578ac721ca55914260a579666b
This commit is contained in:
Drew Thorstensen 2016-07-06 17:56:34 -04:00
parent 66a9c171fc
commit e493bbe1e3
20 changed files with 129 additions and 122 deletions

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from nova import test
from pypowervm import const as pvm_const
@ -30,9 +31,14 @@ class TestDiskAdapter(test.TestCase):
super(TestDiskAdapter, self).setUp()
self.useFixture(fx.ImageAPI())
# These are not used currently.
conn = {'adapter': None, 'host_uuid': None, 'mp_uuid': None}
self.st_adpt = disk_dvr.DiskAdapter(conn)
# Return the mgmt uuid
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock
self.mgmt_uuid.return_value = 'mp_uuid'
# The values (adapter and host uuid) are not used in the base.
# Default them to None.
self.st_adpt = disk_dvr.DiskAdapter(None, None)
def test_capacity(self):
"""These are arbitrary capacity numbers."""

View File

@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
import copy
@ -60,6 +61,11 @@ class TestLocalDisk(test.TestCase):
vg_uuid = 'd5065c2c-ac43-3fa6-af32-ea84a3960291'
self.mock_vg_uuid.return_value = ('vios_uuid', vg_uuid)
# Return the mgmt uuid
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock
self.mgmt_uuid.return_value = 'mp_uuid'
def tearDown(self):
test.TestCase.tearDown(self)
@ -68,8 +74,7 @@ class TestLocalDisk(test.TestCase):
@staticmethod
def get_ls(adpt):
return ld.LocalStorage({'adapter': adpt, 'host_uuid': 'host_uuid',
'mp_uuid': 'mp_uuid'})
return ld.LocalStorage(adpt, 'host_uuid')
@mock.patch('pypowervm.tasks.storage.upload_new_vdisk')
@mock.patch('nova_powervm.virt.powervm.disk.driver.'
@ -400,6 +405,11 @@ class TestLocalDiskFindVG(test.TestCase):
self.mock_vios_feed = [pvm_vios.VIOS.wrap(self.vio_to_vg)]
self.mock_vg_feed = [pvm_stor.VG.wrap(self.vg_to_vio)]
# Return the mgmt uuid
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock
self.mgmt_uuid.return_value = 'mp_uuid'
@mock.patch('pypowervm.wrappers.storage.VG.wrap')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap')
def test_get_vg_uuid(self, mock_vio_wrap, mock_vg_wrap):
@ -411,9 +421,7 @@ class TestLocalDiskFindVG(test.TestCase):
mock_vg_wrap.return_value = self.mock_vg_feed
self.flags(volume_group_name='rootvg', group='powervm')
storage = ld.LocalStorage({'adapter': self.apt,
'host_uuid': 'host_uuid',
'mp_uuid': 'mp_uuid'})
storage = ld.LocalStorage(self.apt, 'host_uuid')
# Make sure the uuids match
self.assertEqual('d5065c2c-ac43-3fa6-af32-ea84a3960291',
@ -435,5 +443,4 @@ class TestLocalDiskFindVG(test.TestCase):
volume_group_vios_name='invalid_vios', group='powervm')
self.assertRaises(npvmex.VGNotFound, ld.LocalStorage,
{'adapter': self.apt, 'host_uuid': 'host_uuid',
'mp_uuid': 'mp_uuid'})
self.apt, 'host_uuid')

View File

@ -112,11 +112,14 @@ class TestSSPDiskAdapter(test.TestCase):
# By default, assume the config supplied a Cluster name
self.flags(cluster_name='clust1', group='powervm')
# Return the mgmt uuid
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.mgmt.mgmt_uuid')).mock
self.mgmt_uuid.return_value = 'mp_uuid'
def _get_ssp_stor(self):
ssp_stor = ssp_dvr.SSPDiskAdapter(
{'adapter': self.apt,
'host_uuid': '67dca605-3923-34da-bd8f-26a378fc817f',
'mp_uuid': 'mp_uuid'})
self.apt, '67dca605-3923-34da-bd8f-26a378fc817f')
return ssp_stor
def _bld_resp(self, status=200, entry_or_list=None):

View File

@ -60,10 +60,10 @@ class TestVMTasks(test.TestCase):
@mock.patch('nova_powervm.virt.powervm.vm.rename')
def test_rename(self, mock_vm_rename):
mock_vm_rename.return_value = 'new_entry'
rename = tf_vm.Rename(self.apt, 'host_uuid', self.instance, 'new_name')
rename = tf_vm.Rename(self.apt, self.instance, 'new_name')
new_entry = rename.execute()
mock_vm_rename.assert_called_once_with(self.apt, 'host_uuid',
self.instance, 'new_name')
mock_vm_rename.assert_called_once_with(
self.apt, self.instance, 'new_name')
self.assertEqual('new_entry', new_entry)
def test_store_nvram(self):

View File

@ -159,11 +159,11 @@ class TestPowerVMDriver(test.TestCase):
self.assertIsNotNone(vol_connector['wwpns'])
self.assertIsNotNone(vol_connector['host'])
def test_get_disk_adapter(self):
def test_setup_disk_adapter(self):
# Ensure we can handle upper case option and we instantiate the class
self.flags(disk_driver='LoCaLDisK', group='powervm')
self.drv.disk_dvr = None
self.drv._get_disk_adapter()
self.drv._setup_disk_adapter()
# The local disk driver has been mocked, so we just compare the name
self.assertIn('LocalStorage()', str(self.drv.disk_dvr))
@ -622,16 +622,16 @@ class TestPowerVMDriver(test.TestCase):
self.assertEqual(2, self.vol_drv.disconnect_volume.call_count)
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg'
'.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.storage.CreateDiskForImg.'
'execute')
@mock.patch('nova.virt.powervm.driver.PowerVMDriver.'
'_is_booted_from_volume')
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
@mock.patch('nova_powervm.virt.powervm.tasks.vm.UpdateIBMiSettings'
'.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.vm.UpdateIBMiSettings.'
'execute')
@mock.patch('nova.virt.powervm.driver.PowerVMDriver.'
'_get_boot_connectivity_type')
@mock.patch('pypowervm.tasks.power.power_on')

View File

@ -39,6 +39,20 @@ class TestMgmt(test.TestCase):
self.resp = lpar_http.response
@mock.patch('pypowervm.tasks.partition.get_this_partition')
def test_mgmt_uuid(self, mock_get_partition):
mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
adpt = mock.Mock()
# First run should call the partition only once
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
mock_get_partition.assert_called_once_with(adpt)
# But a subsequent call should effectively no-op
mock_get_partition.reset_mock()
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
mock_get_partition.assert_not_called()
@mock.patch('glob.glob')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.realpath')

View File

@ -453,8 +453,7 @@ class TestVM(test.TestCase):
entry = mock.Mock()
entry.update.return_value = 'NewEntry'
new_entry = vm.rename(self.apt, 'mock_host_uuid', instance, 'new_name',
entry=entry)
new_entry = vm.rename(self.apt, instance, 'new_name', entry=entry)
self.assertEqual('new_name', entry.name)
entry.update.assert_called_once_with()
mock_entry_transaction.assert_called_once_with(mock.ANY)
@ -465,9 +464,8 @@ class TestVM(test.TestCase):
# Test optional entry parameter
entry.reset_mock()
mock_get_inst.return_value = entry
new_entry = vm.rename(self.apt, 'mock_host_uuid', instance, 'new_name')
mock_get_inst.assert_called_once_with(self.apt, instance,
'mock_host_uuid')
new_entry = vm.rename(self.apt, instance, 'new_name')
mock_get_inst.assert_called_once_with(self.apt, instance)
self.assertEqual('new_name', entry.name)
entry.update.assert_called_once_with()
self.assertEqual('NewEntry', new_entry)
@ -537,7 +535,7 @@ class TestVM(test.TestCase):
def test_instance_exists(self, mock_getvmqp, mock_getuuid):
# Try the good case where it exists
mock_getvmqp.side_effect = 'fake_state'
mock_parms = (mock.Mock(), mock.Mock(), mock.Mock())
mock_parms = (mock.Mock(), mock.Mock())
self.assertTrue(vm.instance_exists(*mock_parms))
# Test the scenario where it does not exist.
@ -626,16 +624,14 @@ class TestVM(test.TestCase):
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
def test_update_ibmi_settings(self, mock_lparw, mock_ibmi):
instance = mock.MagicMock()
# Test update load source with vscsi boot
boot_type = 'vscsi'
vm.update_ibmi_settings(
self.apt, instance, 'host-uuid', boot_type)
mock_ibmi.assert_called_once_with(
self.apt, mock.ANY, 'vscsi')
vm.update_ibmi_settings(self.apt, instance, boot_type)
mock_ibmi.assert_called_once_with(self.apt, mock.ANY, 'vscsi')
mock_ibmi.reset_mock()
# Test update load source with npiv boot
boot_type = 'npiv'
vm.update_ibmi_settings(
self.apt, instance, 'host-uuid', boot_type)
mock_ibmi.assert_called_once_with(
self.apt, mock.ANY, 'npiv')
vm.update_ibmi_settings(self.apt, instance, boot_type)
mock_ibmi.assert_called_once_with(self.apt, mock.ANY, 'npiv')

View File

@ -30,6 +30,7 @@ import pypowervm.wrappers.virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import mgmt
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
@ -73,15 +74,15 @@ class DiskAdapter(object):
'shared_storage': False,
}
def __init__(self, connection):
def __init__(self, adapter, host_uuid):
"""Initialize the DiskAdapter
:param connection: connection information for the underlying driver
:param adapter: The pypowervm adapter
:param host_uuid: The UUID of the PowerVM host.
"""
self._connection = connection
self.adapter = connection['adapter']
self.host_uuid = connection['host_uuid']
self.mp_uuid = connection['mp_uuid']
self.adapter = adapter
self.host_uuid = host_uuid
self.mp_uuid = mgmt.mgmt_uuid(self.adapter)
self.image_api = image.API()
@property
@ -151,8 +152,7 @@ class DiskAdapter(object):
element.
"""
if lpar_wrap is None:
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance,
self.host_uuid)
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
match_func = self.disk_match_func(disk_type, instance)
for vios_uuid in self.vios_uuids:
vios_wrap = pvm_vios.VIOS.wrap(self.adapter.read(
@ -173,8 +173,7 @@ class DiskAdapter(object):
:raise InstanceDiskMappingFailed: If the mapping could not be done.
"""
msg_args = {'instance_name': instance.name}
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance,
self.host_uuid)
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
for stg_elem, vios in self.instance_disk_iter(instance,
lpar_wrap=lpar_wrap):
msg_args['disk_name'] = stg_elem.name

View File

@ -40,8 +40,8 @@ CONF = cfg.CONF
class LocalStorage(disk_dvr.DiskAdapter):
def __init__(self, connection):
super(LocalStorage, self).__init__(connection)
def __init__(self, adapter, host_uuid):
super(LocalStorage, self).__init__(adapter, host_uuid)
# Query to get the Volume Group UUID
self.vg_name = CONF.powervm.volume_group_name
@ -195,11 +195,9 @@ class LocalStorage(disk_dvr.DiskAdapter):
# resize the disk, create a new partition, etc...
# If the image is bigger than disk, API should make the disk big
# enough to support the image (up to 1 Gb boundary).
vdisk, f_wrap = tsk_stg.upload_new_vdisk(
return tsk_stg.upload_new_vdisk(
self.adapter, self._vios_uuid, self.vg_uuid, stream, vol_name,
image_meta.size, d_size=disk_bytes)
return vdisk
image_meta.size, d_size=disk_bytes)[0]
def connect_disk(self, context, instance, disk_info, stg_ftsk=None):
"""Connects the disk image to the Virtual Machine.

View File

@ -54,12 +54,12 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
'shared_storage': True,
}
def __init__(self, connection):
def __init__(self, adapter, host_uuid):
"""Initialize the SSPDiskAdapter.
:param connection: connection information for the underlying driver
"""
super(SSPDiskAdapter, self).__init__(connection)
super(SSPDiskAdapter, self).__init__(adapter, host_uuid)
self._cluster = self._fetch_cluster(CONF.powervm.cluster_name)
self.clust_name = self._cluster.name
@ -236,10 +236,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
boot_lu_name = self._get_disk_name(image_type, instance)
LOG.info(_LI('SSP: Disk name is %s'), boot_lu_name)
tier, boot_lu = tsk_stg.crt_lu(self._tier, boot_lu_name, disk_size_gb,
typ=pvm_stg.LUType.DISK, clone=image_lu)
return boot_lu
return tsk_stg.crt_lu(self._tier, boot_lu_name, disk_size_gb,
typ=pvm_stg.LUType.DISK, clone=image_lu)[1]
def get_disk_ref(self, instance, disk_type):
"""Returns a reference to the disk for the instance."""

View File

@ -122,15 +122,12 @@ class PowerVMDriver(driver.ComputeDriver):
self._get_adapter()
# First need to resolve the managed host UUID
self._get_host_uuid()
# Get the management partition's UUID
self.mp_uuid = pvm_par.get_this_partition(self.adapter).uuid
LOG.debug("Driver found compute partition UUID of: %s" % self.mp_uuid)
# Make sure the Virtual I/O Server(s) are available.
pvm_par.validate_vios_ready(self.adapter)
# Initialize the disk adapter. Sets self.disk_dvr
self._get_disk_adapter()
self._setup_disk_adapter()
self.image_api = image.API()
self._setup_rebuild_store()
@ -170,16 +167,14 @@ class PowerVMDriver(driver.ComputeDriver):
eh = NovaEventHandler(self)
self.session.get_event_listener().subscribe(eh)
def _get_disk_adapter(self):
conn_info = {'adapter': self.adapter, 'host_uuid': self.host_uuid,
'mp_uuid': self.mp_uuid}
def _setup_disk_adapter(self):
"""Set up the nova ephemeral disk adapter."""
self.disk_dvr = importutils.import_object_ns(
DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
conn_info)
self.adapter, self.host_uuid)
def _setup_rebuild_store(self):
"""Setup the store for remote restart objects."""
"""Set up the store for remote restart objects."""
store = CONF.powervm.nvram_store.lower()
if store != 'none':
self.store_api = importutils.import_object(
@ -246,7 +241,7 @@ class PowerVMDriver(driver.ComputeDriver):
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
"""
return vm.instance_exists(self.adapter, instance, self.host_uuid)
return vm.instance_exists(self.adapter, instance)
def estimate_instance_overhead(self, instance_info):
"""Estimate the virtualization overhead required to build an instance.
@ -454,7 +449,7 @@ class PowerVMDriver(driver.ComputeDriver):
boot_type = self._get_boot_connectivity_type(
context, bdms, block_device_info)
flow_spawn.add(tf_vm.UpdateIBMiSettings(
self.adapter, instance, self.host_uuid, boot_type))
self.adapter, instance, boot_type))
# Save the slot map information
flow_spawn.add(tf_slot.SaveSlotStore(instance, slot_mgr))
@ -752,7 +747,7 @@ class PowerVMDriver(driver.ComputeDriver):
# for each volume attached to the instance, against the destination
# host. If the migration failed, then the VM is probably not on
# the destination host.
if not vm.instance_exists(self.adapter, instance, self.host_uuid):
if not vm.instance_exists(self.adapter, instance):
LOG.info(_LI('During volume detach, the instance was not found'
' on this host.'), instance=instance)
@ -942,7 +937,7 @@ class PowerVMDriver(driver.ComputeDriver):
"""
self._log_operation(reboot_type + ' reboot', instance)
force_immediate = reboot_type == 'HARD'
entry = vm.get_instance_wrapper(self.adapter, instance, self.host_uuid)
entry = vm.get_instance_wrapper(self.adapter, instance)
if entry.state != pvm_bp.LPARState.NOT_ACTIVATED:
pvm_pwr.power_off(entry, self.host_uuid, restart=True,
force_immediate=force_immediate)
@ -1195,8 +1190,7 @@ class PowerVMDriver(driver.ComputeDriver):
# easy to see the VM is being migrated from pvmctl. We use the resize
# name so we don't destroy it on a revert when it's on the same host.
new_name = self._gen_resize_name(instance, same_host=same_host)
flow.add(tf_vm.Rename(self.adapter, self.host_uuid, instance,
new_name))
flow.add(tf_vm.Rename(self.adapter, instance, new_name))
try:
tf_eng.run(flow)
except Exception as e:
@ -1343,7 +1337,7 @@ class PowerVMDriver(driver.ComputeDriver):
if same_host:
# This was a local resize, don't delete our only VM!
self._log_operation('confirm resize', instance)
vm.rename(self.adapter, self.host_uuid, instance, instance.name)
vm.rename(self.adapter, instance, instance.name)
return
# Confirming the migrate means we need to delete source VM.

View File

@ -238,8 +238,7 @@ class LiveMigrationSrc(LiveMigration):
:returns: a PowerVMLiveMigrateData object
"""
lpar_w = vm.get_instance_wrapper(
self.drvr.adapter, self.instance, self.drvr.host_uuid)
lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
self.lpar_w = lpar_w
LOG.debug('Dest Migration data: %s' % self.mig_data)

View File

@ -27,13 +27,26 @@ from nova import exception
from nova import utils
import os
from os import path
from oslo_concurrency import lockutils
from oslo_log import log as logging
from pypowervm.tasks import partition as pvm_par
import retrying
from nova_powervm.virt.powervm import exception as npvmex
LOG = logging.getLogger(__name__)
_MP_UUID = None
@lockutils.synchronized("mgmt_lpar_uuid")
def mgmt_uuid(adapter):
"""Returns the management partitions UUID."""
global _MP_UUID
if not _MP_UUID:
_MP_UUID = pvm_par.get_this_partition(adapter).uuid
return _MP_UUID
def _tee_as_root(fpath, payload):
"""Executes 'echo $payload | sudo tee -a $fpath'.

View File

@ -188,7 +188,6 @@ class NvramManager(object):
try:
# Get the data from the adapter.
entry = vm.get_instance_wrapper(self._adapter, instance,
self._host_uuid,
xag=[pvm_const.XAG.NVRAM])
data = entry.nvram
LOG.debug('NVRAM for instance: %s', data, instance=instance)

View File

@ -83,7 +83,7 @@ class UnplugVifs(task.Task):
raise VirtualInterfaceUnplugException()
# Get all the current Client Network Adapters (CNA) on the VM itself.
cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)
cna_w_list = vm.get_cnas(self.adapter, self.instance)
# Walk through the VIFs and delete the corresponding CNA on the VM.
for network_info in self.network_infos:
@ -127,7 +127,7 @@ class PlugVifs(task.Task):
self.instance.name)
# Get the current adapters on the system
cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)
cna_w_list = vm.get_cnas(self.adapter, self.instance)
# Trim the VIFs down to the ones that haven't yet been created.
crt_network_infos = []
@ -235,7 +235,7 @@ class PlugVifs(task.Task):
instance=self.instance)
# Get the current adapters on the system
cna_w_list = vm.get_cnas(self.adapter, self.instance, self.host_uuid)
cna_w_list = vm.get_cnas(self.adapter, self.instance)
for network_info in self.network_infos:
try:
vif.unplug(self.adapter, self.host_uuid, self.instance,

View File

@ -48,8 +48,7 @@ class Get(task.Task):
self.instance = instance
def execute(self):
return vm.get_instance_wrapper(self.adapter, self.instance,
self.host_uuid)
return vm.get_instance_wrapper(self.adapter, self.instance)
class Create(task.Task):
@ -141,29 +140,25 @@ class Rename(task.Task):
"""The task for renaming an existing VM."""
def __init__(self, adapter, host_uuid, instance, name):
def __init__(self, adapter, instance, name):
"""Creates the Task to rename a VM.
Provides the 'lpar_wrap' for other tasks.
:param adapter: The adapter for the pypowervm API
:param host_uuid: The managed system uuid
:param instance: The nova instance.
:param name: The new VM name.
"""
super(Rename, self).__init__(name='rename_lpar_%s' % name,
provides='lpar_wrap')
self.adapter = adapter
self.host_uuid = host_uuid
self.instance = instance
self.vm_name = name
def execute(self):
LOG.info(_LI('Renaming instance to name: %s'), self.name,
instance=self.instance)
wrap = vm.rename(self.adapter, self.host_uuid, self.instance,
self.vm_name)
return wrap
return vm.rename(self.adapter, self.instance, self.vm_name)
class PowerOn(task.Task):
@ -323,23 +318,19 @@ class UpdateIBMiSettings(task.Task):
"""The task to update settings of an ibmi instance."""
def __init__(self, adapter, instance, host_uuid, boot_type):
def __init__(self, adapter, instance, boot_type):
"""Create the Task to update settings of the IBMi VM.
:param adapter: The adapter for the pypowervm API.
:param instance: The nova instance.
:param host_uuid: The host's PowerVM UUID.
:param boot_type: The boot type of the instance.
"""
super(UpdateIBMiSettings, self).__init__(
name='update_ibmi_settings')
self.adapter = adapter
self.instance = instance
self.host_uuid = host_uuid
self.boot_type = boot_type
def execute(self):
LOG.info(_LI('Update settings of instance %s.'),
self.instance.name)
vm.update_ibmi_settings(
self.adapter, self.instance, self.host_uuid, self.boot_type)
LOG.info(_LI('Update settings of instance %s.'), self.instance.name)
vm.update_ibmi_settings(self.adapter, self.instance, self.boot_type)

View File

@ -238,8 +238,7 @@ class PvmVifDriver(object):
# Need to find the adapters if they were not provided
if not cna_w_list:
cna_w_list = vm.get_cnas(self.adapter, self.instance,
self.host_uuid)
cna_w_list = vm.get_cnas(self.adapter, self.instance)
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
@ -396,8 +395,7 @@ class PvmLBVifDriver(PvmLioVifDriver):
"""
# Need to find the adapters if they were not provided
if not cna_w_list:
cna_w_list = vm.get_cnas(self.adapter, self.instance,
self.host_uuid)
cna_w_list = vm.get_cnas(self.adapter, self.instance)
# Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif)
@ -472,8 +470,7 @@ class PvmOvsVifDriver(PvmLioVifDriver):
"""
# Need to find the adapters if they were not provided
if not cna_w_list:
cna_w_list = vm.get_cnas(self.adapter, self.instance,
self.host_uuid)
cna_w_list = vm.get_cnas(self.adapter, self.instance)
# Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif)

View File

@ -449,12 +449,11 @@ def get_lpar_names(adapter):
return [x.name for x in get_lpars(adapter)]
def get_instance_wrapper(adapter, instance, host_uuid, xag=None):
def get_instance_wrapper(adapter, instance, xag=None):
"""Get the LPAR wrapper for a given Nova instance.
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance.
:param host_uuid: The host UUID
:param xag: The pypowervm XAG to be used on the read request
:return: The pypowervm logical_partition wrapper.
"""
@ -471,12 +470,11 @@ def get_instance_wrapper(adapter, instance, host_uuid, xag=None):
raise
def instance_exists(adapter, instance, host_uuid, log_errors=False):
def instance_exists(adapter, instance, log_errors=False):
"""Determine if an instance exists on the host.
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance.
:param host_uuid: The host UUID
:param log_errors: Indicator whether to log REST data after an exception
:return: boolean, whether the instance exists.
"""
@ -578,7 +576,7 @@ def update(adapter, host_wrapper, instance, flavor, entry=None, name=None):
"""
if not entry:
entry = get_instance_wrapper(adapter, instance, host_wrapper.uuid)
entry = get_instance_wrapper(adapter, instance)
lpar_b = VMBuilder(host_wrapper, adapter).lpar_builder(instance, flavor)
lpar_b.rebuild(entry)
@ -590,11 +588,10 @@ def update(adapter, host_wrapper, instance, flavor, entry=None, name=None):
return entry.update()
def rename(adapter, host_uuid, instance, name, entry=None):
def rename(adapter, instance, name, entry=None):
"""Rename a VM.
:param adapter: The adapter for the pypowervm API
:param host_uuid: The host UUID.
:param instance: The nova instance.
:param name: The new name.
:param entry: The instance pvm entry, if available, otherwise it will
@ -602,7 +599,7 @@ def rename(adapter, host_uuid, instance, name, entry=None):
:returns: The updated LPAR wrapper.
"""
if not entry:
entry = get_instance_wrapper(adapter, instance, host_uuid)
entry = get_instance_wrapper(adapter, instance)
hyp_name = pvm_util.sanitize_partition_name_for_api(name)
@ -640,7 +637,7 @@ def dlt_lpar(adapter, lpar_uuid):
def power_on(adapter, instance, host_uuid, entry=None):
if entry is None:
entry = get_instance_wrapper(adapter, instance, host_uuid)
entry = get_instance_wrapper(adapter, instance)
# Get the current state and see if we can start the VM
if entry.state in POWERVM_STARTABLE_STATE:
@ -654,7 +651,7 @@ def power_on(adapter, instance, host_uuid, entry=None):
def power_off(adapter, instance, host_uuid, entry=None, add_parms=None,
force_immediate=False):
if entry is None:
entry = get_instance_wrapper(adapter, instance, host_uuid)
entry = get_instance_wrapper(adapter, instance)
# Get the current state and see if we can stop the VM
LOG.debug("Powering off request for instance %(inst)s which is in "
@ -732,13 +729,12 @@ def get_instance(context, pvm_uuid):
return None
def get_cnas(adapter, instance, host_uuid):
def get_cnas(adapter, instance):
"""Returns the current CNAs on the instance.
The Client Network Adapters are the Ethernet adapters for a VM.
:param adapter: The pypowervm adapter.
:param instance: The nova instance.
:param host_uuid: The host system UUID.
:return The CNA wrappers that represent the ClientNetworkAdapters on the VM
"""
cna_resp = adapter.read(pvm_lpar.LPAR.schema_type,
@ -761,14 +757,13 @@ def norm_mac(mac):
return ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
def update_ibmi_settings(adapter, instance, host_uuid, boot_type):
def update_ibmi_settings(adapter, instance, boot_type):
"""Update settings of IBMi VMs on the instance.
:param adapter: The pypowervm adapter.
:param instance: The nova instance.
:param host_uuid: The host system UUID.
:param boot_type: The boot connectivity type of the instance.
"""
lpar_wrap = get_instance_wrapper(adapter, instance, host_uuid)
lpar_wrap = get_instance_wrapper(adapter, instance)
entry = ibmi.update_ibmi_settings(adapter, lpar_wrap, boot_type)
entry.update()

View File

@ -194,8 +194,7 @@ class PowerVMVolumeAdapter(object):
slots used when a volume is attached to the VM
"""
# Check if the VM is in a state where the attach is acceptable.
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance,
self.host_uuid)
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
capable, reason = lpar_w.can_modify_io()
if not capable:
raise exc.VolumeAttachFailed(
@ -215,8 +214,7 @@ class PowerVMVolumeAdapter(object):
slots used when a volume is detached from the VM.
"""
# Check if the VM is in a state where the detach is acceptable.
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance,
self.host_uuid)
lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
capable, reason = lpar_w.can_modify_io()
if not capable:
raise exc.VolumeDetachFailed(

View File

@ -3,7 +3,7 @@ name = nova_powervm
summary = PowerVM driver for OpenStack Nova.
description-file = README.rst
author = IBM
author-email = kyleh@us.ibm.com
author-email = thorst@us.ibm.com
home-page = https://launchpad.net/nova-powervm
classifier =
Environment :: OpenStack