Add vOpt media to the FeedTask

The transaction manager enables us to batch together various operations.
It is currently being used in nova-powervm to batch together the storage
mapping operations.

This change set builts on previous ones.  It adds the vOpt media to the
FeedTask (or tx_mgr).  This allows for one less independent mapping on
the spawn action.

Note that this undoes a change that has the network and the storage run
in parallel.  This is because the config drive requires input from the
network.  The overall benefit of putting the config drive in the tx_mgr
far outweighs running the network and storage in parallel.

A future change set will evaluate running all the storage and network
'creates' in parallel.

Change-Id: Ibeaa8f796372f33595650a62237206b396946e62
This commit is contained in:
Drew Thorstensen
2015-08-11 21:13:12 -04:00
parent cb314320e1
commit a4a684443d
4 changed files with 117 additions and 34 deletions

View File

@@ -18,8 +18,10 @@ import mock
from nova import test
import os
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.wrappers.util import pvmhttp
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm import exception as npvmex
@@ -101,8 +103,9 @@ class TestConfigDrivePowerVM(test.TestCase):
@mock.patch('os.remove')
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
'_upload_vopt')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping')
def test_crt_cfg_drv_vopt(self, mock_add_map, mock_upld,
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
'_attach_vopt')
def test_crt_cfg_drv_vopt(self, mock_attach, mock_upld,
mock_rm, mock_size, mock_validate, mock_cfg_iso):
# Mock Returns
mock_cfg_iso.return_value = '/tmp/cfgdrv/fake.iso', 'fake.iso'
@@ -114,7 +117,46 @@ class TestConfigDrivePowerVM(test.TestCase):
cfg_dr_builder.create_cfg_drv_vopt(mock.MagicMock(), mock.MagicMock(),
mock.MagicMock(), 'fake_lpar')
self.assertTrue(mock_upld.called)
self.assertTrue(mock_add_map.called)
self.assertTrue(mock_attach.called)
mock_attach.assert_called_with(mock.ANY, 'fake_lpar', mock.ANY, None)
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
'_validate_vopt_vg')
def test_attach_vopt(self, mock_validate, mock_build_map, mock_add_map):
# to act as the feed for FeedTaskFx and FeedTask.
feed = [pvm_vios.VIOS.wrap(self.vio_to_vg)]
ft_fx = pvm_fx.FeedTaskFx(feed)
self.useFixture(ft_fx)
mock_instance = mock.MagicMock(name='fake-instance')
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt, 'fake_host')
cfg_dr_builder.vios_uuid = feed[0].uuid
vopt = mock.Mock()
self.apt.read.return_value = self.vio_to_vg
def validate_build(host_uuid, vios_w, lpar_uuid, vopt_elem):
self.assertEqual('fake_host', host_uuid)
self.assertIsInstance(vios_w, pvm_vios.VIOS)
self.assertEqual('lpar_uuid', lpar_uuid)
self.assertEqual(vopt, vopt_elem)
return 'map'
mock_build_map.side_effect = validate_build
def validate_add(vios_w, mapping):
self.assertIsInstance(vios_w, pvm_vios.VIOS)
self.assertEqual(mapping, 'map')
return 'added'
mock_add_map.side_effect = validate_add
cfg_dr_builder._attach_vopt(mock_instance, 'lpar_uuid', vopt)
# Make sure they were called and validated
self.assertEqual(1, mock_build_map.call_count)
self.assertEqual(1, mock_add_map.call_count)
self.assertEqual(1, ft_fx.patchers['update'].mock.call_count)
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
'_validate_vopt_vg')

View File

@@ -35,7 +35,6 @@ from oslo_utils import importutils
import six
from taskflow import engines as tf_eng
from taskflow.patterns import linear_flow as tf_lf
from taskflow.patterns import unordered_flow as tf_uf
from pypowervm import adapter as pvm_apt
from pypowervm import exceptions as pvm_exc
@@ -218,15 +217,11 @@ class PowerVMDriver(driver.ComputeDriver):
flow_spawn.add(tf_vm.Create(self.adapter, self.host_wrapper, instance,
flavor))
# Create a linear flow for the network.
flow_net = tf_lf.Flow("spawn-network")
flow_net.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
network_info, self.host_uuid))
flow_net.add(tf_net.PlugMgmtVif(self.adapter, instance,
self.host_uuid))
# Create a linear flow for the storage items.
flow_stor = tf_lf.Flow("spawn-storage")
# Create a flow for the IO
flow_spawn.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
network_info, self.host_uuid))
flow_spawn.add(tf_net.PlugMgmtVif(self.adapter, instance,
self.host_uuid))
# Create the transaction manager (FeedTask) for Storage I/O.
tx_mgr = vios.build_tx_feed_task(self.adapter, self.host_uuid)
@@ -234,13 +229,13 @@ class PowerVMDriver(driver.ComputeDriver):
# Only add the image disk if this is from Glance.
if not self._is_booted_from_volume(block_device_info):
# Creates the boot image.
flow_stor.add(tf_stg.CreateDiskForImg(
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta,
disk_size=flavor.root_gb))
# Connects up the disk to the LPAR
flow_stor.add(tf_stg.ConnectDisk(self.disk_dvr, context, instance,
tx_mgr))
flow_spawn.add(tf_stg.ConnectDisk(self.disk_dvr, context, instance,
tx_mgr))
# Determine if there are volumes to connect. If so, add a connection
# for each type.
@@ -253,21 +248,11 @@ class PowerVMDriver(driver.ComputeDriver):
# First connect the volume. This will update the
# connection_info.
flow_stor.add(tf_stg.ConnectVolume(vol_drv))
flow_spawn.add(tf_stg.ConnectVolume(vol_drv))
# Save the BDM so that the updated connection info is
# persisted.
flow_stor.add(tf_stg.SaveBDM(bdm, instance))
# Add the transaction manager flow to the end of the 'storage
# connection' tasks. This will run all the connections in parallel.
flow_stor.add(tx_mgr)
# The network and the storage flows can run in parallel. Create an
# unordered flow to run those.
flow_io = tf_uf.Flow("spawn-io")
flow_io.add(flow_net, flow_stor)
flow_spawn.add(flow_io)
flow_spawn.add(tf_stg.SaveBDM(bdm, instance))
# If the config drive is needed, add those steps. Should be done
# after all the other I/O.
@@ -276,6 +261,10 @@ class PowerVMDriver(driver.ComputeDriver):
self.adapter, self.host_uuid, instance, injected_files,
network_info, admin_password))
# Add the transaction manager flow to the end of the 'I/O
# connection' tasks. This will run all the connections in parallel.
flow_spawn.add(tx_mgr)
# Last step is to power on the system.
flow_spawn.add(tf_vm.PowerOn(self.adapter, self.host_uuid, instance))

View File

@@ -28,6 +28,7 @@ from pypowervm import const as pvm_const
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tasks import storage as tsk_stg
from pypowervm import util as pvm_util
from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import base_partition as pvm_bp
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import storage as pvm_stg
@@ -106,7 +107,8 @@ class ConfigDrivePowerVM(object):
return iso_path, file_name
def create_cfg_drv_vopt(self, instance, injected_files, network_info,
lpar_uuid, admin_pass=None, mgmt_cna=None):
lpar_uuid, admin_pass=None, mgmt_cna=None,
tx_mgr=None):
"""Creates the config drive virtual optical and attach to VM.
:param instance: The VM instance from OpenStack.
@@ -116,6 +118,11 @@ class ConfigDrivePowerVM(object):
:param lpar_uuid: The UUID of the client LPAR
:param admin_pass: (Optional) password to inject for the VM.
:param mgmt_cna: (Optional) The management (RMC) CNA wrapper.
:param tx_mgr: (Optional) If provided, the storage mappings to connect
the Media to the VM will be deferred on to the
FeedTask passed in. The execute can be done all in one
method (batched together). If None (the default) will
be attached immediately.
"""
# If there is a management client network adapter, then we should
# convert that to a VIF and add it to the network info
@@ -133,9 +140,47 @@ class ConfigDrivePowerVM(object):
# Delete the media
os.remove(iso_path)
# Add the mapping to the virtual machine
tsk_map.add_vscsi_mapping(self.host_uuid, self.vios_uuid, lpar_uuid,
vopt)
# Run the attach of the virtual optical
self._attach_vopt(instance, lpar_uuid, vopt, tx_mgr)
def _attach_vopt(self, instance, lpar_uuid, vopt, tx_mgr=None):
"""Will attach the vopt to the VIOS.
If the tx_mgr is provided, adds the mapping to the tx_mgr, but won't
attach until the tx_mgr is independently executed.
:param instance: The VM instance from OpenStack.
:param lpar_uuid: The UUID of the client LPAR
:param vopt: The virtual optical device to add.
:param tx_mgr: (Optional) If provided, the storage mappings to connect
the Media to the VM will be deferred on to the
FeedTask passed in. The execute can be done all in one
method (batched together). If None (the default) will
be attached immediately.
"""
# If no transaction manager, build locally so that we can run
# immediately
if tx_mgr is None:
wtsk = pvm_tx.WrapperTask('media_attach', pvm_vios.VIOS.getter(
self.adapter, entry_uuid=self.vios_uuid,
xag=[pvm_vios.VIOS.xags.SCSI_MAPPING]))
else:
wtsk = tx_mgr.wrapper_tasks[self.vios_uuid]
# Define the function to build and add the mapping
def add_func(vios_w):
LOG.info(_LI("Adding cfg drive mapping for instance %(inst)s for "
"Virtual I/O Server %(vios)s"),
{'inst': instance.name, 'vios': vios_w.name})
mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
lpar_uuid, vopt)
return tsk_map.add_map(vios_w, mapping)
wtsk.add_functor_subtask(add_func)
# If built locally, then execute
if tx_mgr is None:
wtsk.execute()
def _mgmt_cna_to_vif(self, cna):
"""Converts the mgmt CNA to VIF format for network injection."""

View File

@@ -323,7 +323,7 @@ class CreateAndConnectCfgDrive(task.Task):
"""The task to create the configuration drive."""
def __init__(self, adapter, host_uuid, instance, injected_files,
network_info, admin_pass):
network_info, admin_pass, tx_mgr=None):
"""Create the Task that create and connect the config drive.
Requires the 'lpar_wrap' and 'mgmt_cna'
@@ -337,6 +337,12 @@ class CreateAndConnectCfgDrive(task.Task):
the ISO.
:param network_info: The network_info from the nova spawn method.
:param admin_pass: Optional password to inject for the VM.
:param tx_mgr: (Optional) The pypowervm transaction FeedTask for
the I/O Operations. If provided, the Virtual I/O Server
mapping updates will be added to the FeedTask. This
defers the updates to some later point in time. If the
FeedTask is not provided, the updates will be run
immediately when this method is executed.
"""
super(CreateAndConnectCfgDrive, self).__init__(
name='cfg_drive', requires=['lpar_wrap', 'mgmt_cna'])
@@ -347,6 +353,7 @@ class CreateAndConnectCfgDrive(task.Task):
self.network_info = network_info
self.ad_pass = admin_pass
self.mb = None
self.tx_mgr = tx_mgr
def execute(self, lpar_wrap, mgmt_cna):
LOG.info(_LI('Creating Config Drive for instance: %s'),
@@ -355,7 +362,7 @@ class CreateAndConnectCfgDrive(task.Task):
self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
self.network_info, lpar_wrap.uuid,
admin_pass=self.ad_pass,
mgmt_cna=mgmt_cna)
mgmt_cna=mgmt_cna, tx_mgr=self.tx_mgr)
def revert(self, lpar_wrap, mgmt_cna, result, flow_failures):
# The parameters have to match the execute method, plus the response +