Remove vios module - use pypowervm.tasks.partition

Remove nova_powervm.virt.powervm.vios and replace its usages with calls
into the respective methods in pypowervm.tasks.partition.

Change-Id: I5fd3ffa3d55dc95f0e3ac6f6edc18caab92299b5
This commit is contained in:
Eric Fried 2016-06-23 11:27:32 -05:00
parent 3bf7c05998
commit a200a01e71
11 changed files with 35 additions and 471 deletions

View File

@ -109,7 +109,7 @@ class TestLocalDisk(test.TestCase):
self.assertEqual(3072.0, local.capacity_used)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_disconnect_image_disk(self, mock_active_vioses, mock_rm_maps):
# vio_to_vg is a single-entry response. Wrap it and put it in a list
# to act as the feed for FeedTaskFx and FeedTask.
@ -138,7 +138,7 @@ class TestLocalDisk(test.TestCase):
match_func=mock.ANY)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_disconnect_image_disk_no_update(self, mock_active_vioses,
mock_rm_maps):
# vio_to_vg is a single-entry response. Wrap it and put it in a list
@ -192,7 +192,7 @@ class TestLocalDisk(test.TestCase):
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_connect_image_disk(self, mock_active_vioses, mock_add_map,
mock_build_map):
# vio_to_vg is a single-entry response. Wrap it and put it in a list
@ -221,7 +221,7 @@ class TestLocalDisk(test.TestCase):
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_connect_image_disk_no_update(self, mock_active_vioses,
mock_add_map, mock_build_map):
# vio_to_vg is a single-entry response. Wrap it and put it in a list

View File

@ -401,7 +401,7 @@ class TestSSPDiskAdapter(test.TestCase):
'vios_uuids')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_connect_disk(self, mock_active_vioses, mock_add_map,
mock_build_map, mock_vio_uuids):
# vio is a single-entry response. Wrap it and put it in a list
@ -432,7 +432,7 @@ class TestSSPDiskAdapter(test.TestCase):
'vios_uuids')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_connect_disk_no_update(self, mock_active_vioses, mock_add_map,
mock_build_map, mock_vio_uuids):
# vio is a single-entry response. Wrap it and put it in a list
@ -471,7 +471,7 @@ class TestSSPDiskAdapter(test.TestCase):
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps')
@mock.patch('pypowervm.tasks.scsi_mapper.remove_maps')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.tasks.partition.get_active_vioses')
def test_disconnect_disk(self, mock_active_vioses, mock_build_map,
mock_remove_maps, mock_find_maps, mock_vio_uuids):
# vio is a single-entry response. Wrap it and put it in a list

View File

@ -93,15 +93,11 @@ class PowerVMComputeDriver(fixtures.Fixture):
# Mock active vios
self.get_active_vios = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.vios.get_active_vioses')).mock
'pypowervm.tasks.partition.get_active_vioses')).mock
self.get_active_vios.return_value = ['mock_vios']
# Mock get inactive running vioses
self.get_inactive_running_vioses = self.useFixture(
fixtures.MockPatch(
'nova_powervm.virt.powervm.vios.'
'get_inactive_running_vioses')).mock
self.get_inactive_running_vioses.return_value = []
self.useFixture(fixtures.MockPatch(
'pypowervm.tasks.partition.validate_vios_ready'))
self.drv.adapter.read.return_value = ms_http
self.drv.session = self.drv.adapter.session

View File

@ -111,7 +111,7 @@ class TestPowerVMDriver(test.TestCase):
'nova_powervm.virt.powervm.vm.get_instance_wrapper')).mock
self.build_tx_feed = self.useFixture(fixtures.MockPatch(
'nova_powervm.virt.powervm.vios.build_tx_feed_task')).mock
'pypowervm.tasks.partition.build_active_vio_feed_task')).mock
self.stg_ftsk = pvm_tx.FeedTask('fake', pvm_vios.VIOS.getter(self.apt))
self.build_tx_feed.return_value = self.stg_ftsk
@ -549,9 +549,9 @@ class TestPowerVMDriver(test.TestCase):
# Recreate uses all XAGs.
self.build_tx_feed.assert_called_once_with(
self.drv.adapter, self.drv.host_uuid, xag={pvm_const.XAG.VIO_FMAP,
pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP})
self.drv.adapter, xag={pvm_const.XAG.VIO_FMAP,
pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP})
# _vol_drv_iter gets called once in spawn itself, and once under
# _add_volume_connection_tasks.
# TODO(IBM): Find a way to make the call just once. Unless it's cheap.

View File

@ -1,231 +0,0 @@
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import retrying
from nova import test
from pypowervm import const as pvm_const
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.test_utils import pvmhttp
from pypowervm.wrappers import base_partition as pvm_bp
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import exception as nova_pvm_exc
from nova_powervm.virt.powervm import vios
VIOS_FEED = 'fake_vios_feed2.txt'
class TestVios(test.TestCase):
def setUp(self):
super(TestVios, self).setUp()
self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt
def resp(file_name):
return pvmhttp.load_pvm_resp(file_name).get_response()
self.vios_feed_resp = resp(VIOS_FEED)
def test_get_active_vioses(self):
self.adpt.read.return_value = self.vios_feed_resp
vioses = vios.get_active_vioses(self.adpt, 'host_uuid')
self.assertEqual(1, len(vioses))
vio = vioses[0]
self.assertEqual(pvm_bp.LPARState.RUNNING, vio.state)
self.assertEqual(pvm_bp.RMCState.ACTIVE, vio.rmc_state)
self.adpt.read.assert_called_with(pvm_vios.VIOS.schema_type, xag=None)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_get_active_vioses_w_vios_wraps(self, mock_get):
mock_vios1 = mock.Mock(state='running', rmc_state='active')
mock_vios2 = mock.Mock(state='running', rmc_state='inactive')
vios_wraps = [mock_vios1, mock_vios2]
vioses = vios.get_active_vioses(
self.adpt, 'host_uuid', vios_wraps=vios_wraps)
self.assertEqual(1, len(vioses))
vio = vioses[0]
self.assertEqual(pvm_bp.LPARState.RUNNING, vio.state)
self.assertEqual(pvm_bp.RMCState.ACTIVE, vio.rmc_state)
self.assertEqual(0, mock_get.call_count)
def test_get_inactive_running_vioses(self):
# No
mock_vios1 = mock.Mock(
state=pvm_bp.LPARState.NOT_ACTIVATED,
rmc_state=pvm_bp.RMCState.INACTIVE)
# No
mock_vios2 = mock.Mock(
state=pvm_bp.LPARState.RUNNING,
rmc_state=pvm_bp.RMCState.BUSY)
# Yes
mock_vios3 = mock.Mock(
state=pvm_bp.LPARState.RUNNING,
rmc_state=pvm_bp.RMCState.UNKNOWN)
# No
mock_vios4 = mock.Mock(
state=pvm_bp.LPARState.UNKNOWN,
rmc_state=pvm_bp.RMCState.ACTIVE)
# No
mock_vios5 = mock.Mock(
state=pvm_bp.LPARState.RUNNING,
rmc_state=pvm_bp.RMCState.ACTIVE)
# Yes
mock_vios6 = mock.Mock(
state=pvm_bp.LPARState.RUNNING,
rmc_state=pvm_bp.RMCState.INACTIVE)
self.assertEqual(
{mock_vios6, mock_vios3}, set(vios.get_inactive_running_vioses(
[mock_vios1, mock_vios2, mock_vios3, mock_vios4,
mock_vios5, mock_vios6])))
mock_vios2 = mock.Mock()
def test_get_physical_wwpns(self):
self.adpt.read.return_value = self.vios_feed_resp
expected = set(['21000024FF649104'])
result = set(vios.get_physical_wwpns(self.adpt, 'fake_uuid'))
self.assertSetEqual(expected, result)
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('pypowervm.utils.transaction.FeedTask')
def test_build_tx_feed_task(self, mock_feed_task, mock_get_active_vioses):
mock_get_active_vioses.return_value = ['vios1', 'vios2']
mock_feed_task.return_value = 'mock_feed'
self.assertEqual('mock_feed',
vios.build_tx_feed_task('adpt', 'host_uuid'))
mock_get_active_vioses.assert_called_once_with(
'adpt', 'host_uuid',
xag=[pvm_const.XAG.VIO_STOR, pvm_const.XAG.VIO_SMAP,
pvm_const.XAG.VIO_FMAP])
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
def test_build_tx_feed_task_w_empty_feed(self, mock_get_active_vioses):
mock_get_active_vioses.return_value = []
self.assertRaises(
nova_pvm_exc.NoActiveViosForFeedTask, vios.build_tx_feed_task,
mock.MagicMock(), mock.MagicMock())
@mock.patch('retrying.retry')
@mock.patch('nova_powervm.virt.powervm.vios.get_active_vioses')
@mock.patch('nova_powervm.virt.powervm.vios.get_inactive_running_vioses')
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_is_vios_ready(self, mock_get, mock_get_inactive_vioses,
mock_get_active_vioses, mock_retry):
adpt = mock.MagicMock()
host_uuid = 'host_uuid'
# Validates the retry method itself.
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(5000, kwargs['wait_fixed'])
self.assertEqual(300000, kwargs['stop_max_delay'])
# Used to simulate an eventual timeout.
def retry_timeout(**kwargs):
# First validate the retry.
validate_retry(kwargs)
def one_running_inactive_vio():
mock_vios1 = mock.Mock()
mock_vios1.configure_mock(name='vios1')
return [mock_vios1]
def wrapped(_poll_for_dev):
return one_running_inactive_vio
return wrapped
# Validate that we will eventually timeout.
mock_retry.side_effect = retry_timeout
mock_get_active_vioses.return_value = [mock.Mock()]
# Shouldn't raise an error because we have active vioses
vios.validate_vios_ready(adpt, host_uuid)
# Should raise an exception now because we timed out and there
# weren't any active VIOSes
mock_get_active_vioses.return_value = []
self.assertRaises(
nova_pvm_exc.ViosNotAvailable, vios.validate_vios_ready, adpt,
host_uuid)
# Now test where we pass through to the actual method in the retry.
def retry_passthrough(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return _poll_for_dev
return wrapped
def get_active_vioses_side_effect(*args, **kwargs):
return kwargs['vios_wraps']
mock_retry.side_effect = retry_passthrough
# First run should succeed because all VIOSes should be active and
# running
mock_get.return_value = ['vios1', 'vios2', 'vios3', 'vios4']
mock_get_inactive_vioses.return_value = []
mock_get_active_vioses.side_effect = get_active_vioses_side_effect
vios.validate_vios_ready(adpt, host_uuid)
# Second run should fail because we raise an exception (which retries,
# and then eventually times out with no active VIOSes)
mock_get.reset_mock()
mock_get.side_effect = Exception('testing error')
mock_get_active_vioses.reset_mock()
mock_get_active_vioses.return_value = []
self.assertRaises(
nova_pvm_exc.ViosNotAvailable, vios.validate_vios_ready, adpt,
host_uuid)
# Last run should succeed but raise a warning because there's
# still inactive running VIOSes
mock_vios1, mock_vios2 = mock.Mock(), mock.Mock()
mock_vios1.configure_mock(name='vios1')
mock_vios2.configure_mock(name='vios2')
mock_get_inactive_vioses.return_value = [mock_vios1, mock_vios2]
mock_get_active_vioses.reset_mock()
mock_get_active_vioses.side_effect = ['vios1', 'vios2']
vios.validate_vios_ready(adpt, host_uuid)
def retry_exception(**kwargs):
validate_retry(kwargs)
def wrapped(func):
return raise_exception
def raise_exception():
raise retrying.RetryError('test retry error')
return wrapped
mock_get_active_vioses.reset_mock()
mock_retry.side_effect = retry_exception
mock_get_active_vioses.side_effect = [[], ['vios1', 'vios2']]
# Test failure when retry decorator fails out with no active VIOSes
self.assertRaises(
nova_pvm_exc.ViosNotAvailable, vios.validate_vios_ready, adpt,
host_uuid)
# Test success when retry decorator fails out with at least 1 active
# VIOS
vios.validate_vios_ready(adpt, host_uuid)

View File

@ -449,7 +449,7 @@ class TestVSCSIAdapter(BaseVSCSITest):
self.assertEqual(0, mock_remove_maps.call_count)
self.assertEqual(0, self.ft_fx.patchers['update'].mock.call_count)
@mock.patch('nova_powervm.virt.powervm.vios.get_physical_wwpns')
@mock.patch('pypowervm.tasks.partition.get_physical_wwpns')
def test_wwpns(self, mock_vio_wwpns):
mock_vio_wwpns.return_value = ['aa', 'bb']

View File

@ -20,6 +20,7 @@ from oslo_log import log as logging
from nova import exception as nova_exc
from pypowervm import const as pvm_const
from pypowervm import exceptions as pvm_exc
from pypowervm.tasks import partition as pvm_tpar
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tasks import storage as tsk_stg
from pypowervm.wrappers import managed_system as pvm_ms
@ -31,7 +32,6 @@ from nova_powervm.virt.powervm.disk import driver as disk_dvr
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vios
from nova_powervm.virt.powervm import vm
@ -123,9 +123,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Ensure we have a transaction manager.
if stg_ftsk is None:
stg_ftsk = vios.build_tx_feed_task(
self.adapter, self.host_uuid, name='localdisk',
xag=[pvm_const.XAG.VIO_SMAP])
stg_ftsk = pvm_tpar.build_active_vio_feed_task(
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
# Build the match function
match_func = tsk_map.gen_match_func(pvm_stg.VDisk, prefixes=disk_type)
@ -221,9 +220,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Ensure we have a transaction manager.
if stg_ftsk is None:
stg_ftsk = vios.build_tx_feed_task(
self.adapter, self.host_uuid, name='localdisk',
xag=[pvm_const.XAG.VIO_SMAP])
stg_ftsk = pvm_tpar.build_active_vio_feed_task(
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
def add_func(vios_w):
LOG.info(_LI("Adding logical volume disk connection between VM "

View File

@ -23,11 +23,11 @@ from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vios
from nova_powervm.virt.powervm import vm
import pypowervm.const as pvm_const
from pypowervm.tasks import cluster_ssp as tsk_cs
from pypowervm.tasks import partition as tsk_par
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tasks import storage as tsk_stg
import pypowervm.util as pvm_u
@ -133,9 +133,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
disconnected from the I/O Server and VM.
"""
if stg_ftsk is None:
stg_ftsk = vios.build_tx_feed_task(
self.adapter, self.host_uuid, name='ssp',
xag=[pvm_const.XAG.VIO_SMAP])
stg_ftsk = tsk_par.build_active_vio_feed_task(
self.adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
lpar_uuid = vm.get_pvm_uuid(instance)
match_func = tsk_map.gen_match_func(pvm_stg.LU, prefixes=disk_type)
@ -265,9 +264,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
immediately when this method is executed.
"""
if stg_ftsk is None:
stg_ftsk = vios.build_tx_feed_task(
self.adapter, self.host_uuid, name='ssp',
xag=[pvm_const.XAG.VIO_SMAP])
stg_ftsk = tsk_par.build_active_vio_feed_task(
self.adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
# Create the LU structure
lu = pvm_stg.LU.bld_ref(self.adapter, disk_info.name, disk_info.udid)

View File

@ -65,7 +65,6 @@ from nova_powervm.virt.powervm.tasks import network as tf_net
from nova_powervm.virt.powervm.tasks import slot as tf_slot
from nova_powervm.virt.powervm.tasks import storage as tf_stg
from nova_powervm.virt.powervm.tasks import vm as tf_vm
from nova_powervm.virt.powervm import vios
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm import volume as vol_attach
@ -128,7 +127,7 @@ class PowerVMDriver(driver.ComputeDriver):
LOG.debug("Driver found compute partition UUID of: %s" % self.mp_uuid)
# Make sure the Virtual I/O Server(s) are available.
vios.validate_vios_ready(self.adapter, self.host_uuid)
pvm_par.validate_vios_ready(self.adapter)
# Initialize the disk adapter. Sets self.disk_dvr
self._get_disk_adapter()
@ -391,8 +390,7 @@ class PowerVMDriver(driver.ComputeDriver):
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms, recreate=recreate)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter, xag=xag)
# Build the PowerVM Slot lookup map. Only the recreate action needs
# the volume driver iterator (to look up volumes and their client
@ -576,8 +574,8 @@ class PowerVMDriver(driver.ComputeDriver):
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter,
xag=xag)
# Build the PowerVM Slot lookup map.
slot_mgr = slot.build_slot_mgr(instance, self.store_api)
@ -1173,8 +1171,8 @@ class PowerVMDriver(driver.ComputeDriver):
if bdms:
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter,
xag=xag)
# Get the slot map. This is so we build the client
# adapters in the same slots.
@ -1271,8 +1269,8 @@ class PowerVMDriver(driver.ComputeDriver):
if bdms or not same_host:
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
stg_ftsk = pvm_par.build_active_vio_feed_task(self.adapter,
xag=xag)
# We need the slot manager
# a) If migrating to a different host: to restore the proper slots;
# b) If adding/removing block devices, to register the slots.

View File

@ -1,194 +0,0 @@
# Copyright 2015, 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import retrying
from pypowervm import const as pvm_const
from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import base_partition as pvm_bp
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as nova_pvm_exc
from nova_powervm.virt.powervm.i18n import _LW
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# RMC must be either active or busy. Busy is allowed because that simply
# means that something is running against the VIOS at the moment...but
# it should recover shortly.
VALID_RMC_STATES = [pvm_bp.RMCState.ACTIVE, pvm_bp.RMCState.BUSY]
# Only a running state is OK for now.
VALID_VM_STATES = [pvm_bp.LPARState.RUNNING]
def get_active_vioses(adapter, host_uuid, xag=None, vios_wraps=None):
"""Returns a list of active Virtual I/O Server Wrappers for a host.
Active is defined by powered on and RMC state being 'active'.
:param adapter: The pypowervm adapter for the query.
:param host_uuid: The host server's UUID (not used).
:param xag: (Optional, Default: None) List of extended attributes to use.
:param vios_wraps: (Optional, Default: None) A list of VIOS wrappers. If
specified, the method will check for active VIOSes
in this list instead of issuing a GET.
:return: List of VIOS wrappers.
"""
if not vios_wraps:
vios_wraps = pvm_vios.VIOS.get(adapter, xag=xag)
return [vio for vio in vios_wraps if is_vios_active(vio)]
def get_inactive_running_vioses(vios_wraps):
"""Method to get RMC inactive but powered on VIOSes
Not waiting for VIOS RMC states to go active when the host boots up
may result in stale adapter mappings from evacuated instances.
:param vios_wraps: A list of VIOS wrappers.
:return: List of RMC inactive but powered on VIOSes from the list.
"""
inactive_running_vioses = []
for vios in vios_wraps:
if (vios.rmc_state not in VALID_RMC_STATES and
vios.state not in [pvm_bp.LPARState.NOT_ACTIVATED,
pvm_bp.LPARState.ERROR,
pvm_bp.LPARState.NOT_AVAILBLE,
pvm_bp.LPARState.SHUTTING_DOWN,
pvm_bp.LPARState.SUSPENDED,
pvm_bp.LPARState.SUSPENDING,
pvm_bp.LPARState.UNKNOWN]):
inactive_running_vioses.append(vios)
return inactive_running_vioses
def is_vios_active(vios):
"""Returns a boolean to indicate if the VIOS is active.
Active is defined by running, and the RMC being 'active'.
:param vios: The Virtual I/O Server wrapper to validate.
:return: Boolean
"""
return (vios.rmc_state in VALID_RMC_STATES and
vios.state in VALID_VM_STATES)
def get_physical_wwpns(adapter, ms_uuid):
"""Returns the active WWPNs of the FC ports across all VIOSes on system."""
resp = adapter.read(pvm_ms.System.schema_type, root_id=ms_uuid,
child_type=pvm_vios.VIOS.schema_type,
xag=[pvm_const.XAG.VIO_STOR])
vios_feed = pvm_vios.VIOS.wrap(resp)
wwpn_list = []
for vios in vios_feed:
wwpn_list.extend(vios.get_active_pfc_wwpns())
return wwpn_list
def build_tx_feed_task(adapter, host_uuid, name='vio_feed_mgr',
xag=[pvm_const.XAG.VIO_STOR,
pvm_const.XAG.VIO_SMAP,
pvm_const.XAG.VIO_FMAP]):
"""Builds the pypowervm transaction FeedTask.
The transaction FeedTask enables users to collect a set of
'WrapperTasks' against a feed of entities (in this case a set of VIOSes).
The WrapperTask (within the FeedTask) handles lock and retry.
This is useful to batch together a set of updates across a feed of elements
(and multiple updates within a given wrapper). This allows for significant
performance improvements.
:param adapter: The pypowervm adapter for the query.
:param host_uuid: The host server's UUID.
:param name: (Optional) The name of the feed manager. Defaults to
vio_feed_mgr.
:param xag: (Optional) List of extended attributes to use. If not passed
in defaults to all storage options (as this is most common
case for using a transaction manager).
"""
active_vio_feed = get_active_vioses(adapter, host_uuid, xag=xag)
if not active_vio_feed:
raise nova_pvm_exc.NoActiveViosForFeedTask()
return pvm_tx.FeedTask(name, active_vio_feed)
def validate_vios_ready(adapter, host_uuid):
"""Check whether VIOS rmc is up and running on this host.
Will query the VIOSes for a period of time attempting to ensure all
running VIOSes get an active RMC. If no VIOSes are ready by the timeout
it will raise an exception. If only some of the VIOSes had RMC go active
by the end of the wait period host initialization will continue.
The timeout is defined by the vios_active_wait_timeout conf option.
:param adapter: The pypowervm adapter for the query.
:param host_uuid: The host server's UUID.
:raises: A ViosNotAvailable exception if a VIOS is not available by a
given timeout.
"""
max_wait_time = CONF.powervm.vios_active_wait_timeout
# Used to keep track of VIOSes and reduce queries to API
vios_wraps = []
rmc_down_vioses = []
@retrying.retry(retry_on_result=lambda result: len(result) > 0,
wait_fixed=5 * 1000,
stop_max_delay=max_wait_time * 1000)
def _wait_for_active_vioses():
try:
# Update the wrappers list and get the list of inactive
# running VIOSes
del vios_wraps[:]
vios_wraps.extend(pvm_vios.VIOS.get(adapter))
return get_inactive_running_vioses(vios_wraps)
except Exception as e:
LOG.exception(e)
# If we errored then we want to keep retrying so return something
# with a length greater than zero
return [None]
try:
rmc_down_vioses = _wait_for_active_vioses()
except retrying.RetryError:
# This is thrown if we've hit our max retry count. If so, no
# issue... just continue
pass
if len(rmc_down_vioses) > 0 and rmc_down_vioses != [None]:
LOG.warning(
_LW('Timed out waiting for the RMC state of all the powered '
'on Virtual I/O Servers to be active. Wait time was: '
'%(time)s seconds. VIOSes that did not go active were: '
'%(vioses)s.'),
{'time': max_wait_time,
'vioses': ', '.join([
vio.name for vio in rmc_down_vioses if vio is not None])})
# If we didn't get a single active VIOS then raise an exception
if not get_active_vioses(adapter, host_uuid, vios_wraps=vios_wraps):
raise nova_pvm_exc.ViosNotAvailable(wait_time=max_wait_time)

View File

@ -24,13 +24,13 @@ from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vios
from nova_powervm.virt.powervm import vm
from nova_powervm.virt.powervm.volume import driver as v_driver
from pypowervm import const as pvm_const
from pypowervm.tasks import client_storage as pvm_c_stor
from pypowervm.tasks import hdisk
from pypowervm.tasks import partition as pvm_tpar
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.utils import transaction as tx
from pypowervm.wrappers import storage as pvm_stor
@ -509,8 +509,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Use a global variable so this is pulled once when the process starts.
global _vscsi_pfc_wwpns
if _vscsi_pfc_wwpns is None:
_vscsi_pfc_wwpns = vios.get_physical_wwpns(self.adapter,
self.host_uuid)
_vscsi_pfc_wwpns = pvm_tpar.get_physical_wwpns(self.adapter)
return _vscsi_pfc_wwpns
def host_name(self):