Refactor validate vopt media repo to pypowervm
The pypowervm library recently refactored the validate vopt logic into itself. This removes the need to have it in nova_powervm. This change set move the logic in nova_powervm out and uses it from pypowervm instead. Change-Id: I49ccd7a6e6fe5128e6ee02e0161aac852284eae3
This commit is contained in:
parent
025c26d8ab
commit
66a9c171fc
|
@ -123,6 +123,10 @@ class TestPowerVMDriver(test.TestCase):
|
|||
'pypowervm.util.sanitize_partition_name_for_api')).mock
|
||||
self.san_lpar_name.side_effect = lambda name: name
|
||||
|
||||
self.validate_vopt = self.useFixture(fixtures.MockPatch(
|
||||
'pypowervm.tasks.vopt.validate_vopt_repo_exists')).mock
|
||||
self.validate_vopt.return_value = None, None
|
||||
|
||||
# Create an instance to test with
|
||||
self.inst = objects.Instance(**powervm.TEST_INST_SPAWNING)
|
||||
self.inst_ibmi = objects.Instance(**powervm.TEST_INST_SPAWNING)
|
||||
|
@ -309,14 +313,11 @@ class TestPowerVMDriver(test.TestCase):
|
|||
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'create_cfg_drv_vopt')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('nova.virt.configdrive.required_by')
|
||||
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
|
||||
@mock.patch('pypowervm.tasks.power.power_on')
|
||||
def test_spawn_with_cfg(self, mock_pwron, mock_get_flv, mock_cfg_drv,
|
||||
mock_val_vopt, mock_cfg_vopt, mock_plug_vifs,
|
||||
mock_plug_mgmt_vif):
|
||||
mock_cfg_vopt, mock_plug_vifs, mock_plug_mgmt_vif):
|
||||
"""Validates the PowerVM spawn w/ config drive operations."""
|
||||
# Set up the mocks to the tasks.
|
||||
mock_get_flv.return_value = self.inst.get_flavor()
|
||||
|
@ -331,8 +332,8 @@ class TestPowerVMDriver(test.TestCase):
|
|||
self.inst, self.inst.get_flavor(),
|
||||
nvram=None)
|
||||
# Config drive was called
|
||||
self.assertTrue(mock_val_vopt.called)
|
||||
self.assertTrue(mock_cfg_vopt.called)
|
||||
self.assertTrue(self.validate_vopt.called)
|
||||
|
||||
# Power on was called
|
||||
self.assertTrue(mock_pwron.called)
|
||||
|
@ -897,15 +898,12 @@ class TestPowerVMDriver(test.TestCase):
|
|||
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'dlt_vopt')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
|
||||
@mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr')
|
||||
def test_destroy_internal(self, mock_bld_slot_mgr, mock_get_flv,
|
||||
mock_pvmuuid, mock_val_vopt, mock_dlt_vopt,
|
||||
mock_pwroff, mock_dlt, mock_boot_from_vol,
|
||||
mock_unplug_vifs):
|
||||
mock_pvmuuid, mock_dlt_vopt, mock_pwroff,
|
||||
mock_dlt, mock_boot_from_vol, mock_unplug_vifs):
|
||||
"""Validates the basic PowerVM destroy."""
|
||||
# NVRAM Manager
|
||||
self.drv.nvram_mgr = mock.Mock()
|
||||
|
@ -1051,16 +1049,13 @@ class TestPowerVMDriver(test.TestCase):
|
|||
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'dlt_vopt')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
|
||||
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
|
||||
@mock.patch('nova_powervm.virt.powervm.slot.build_slot_mgr')
|
||||
def test_destroy_internal_no_nvram_cleanup(self, mock_bld_slot_mgr,
|
||||
mock_get_flv, mock_pvmuuid,
|
||||
mock_val_vopt, mock_dlt_vopt,
|
||||
mock_pwroff, mock_dlt,
|
||||
mock_boot_from_vol,
|
||||
mock_dlt_vopt, mock_pwroff,
|
||||
mock_dlt, mock_boot_from_vol,
|
||||
mock_unplug_vifs):
|
||||
"""Validates the basic PowerVM destroy, without NVRAM cleanup.
|
||||
|
||||
|
@ -1212,12 +1207,9 @@ class TestPowerVMDriver(test.TestCase):
|
|||
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'dlt_vopt')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
|
||||
def test_destroy_rollback(self, mock_get_flv, mock_val_vopt,
|
||||
mock_dlt_vopt, mock_pwroff, mock_dlt,
|
||||
mock_unplug_vifs):
|
||||
def test_destroy_rollback(self, mock_get_flv, mock_dlt_vopt,
|
||||
mock_pwroff, mock_dlt, mock_unplug_vifs):
|
||||
"""Validates the basic PowerVM destroy rollback mechanism works."""
|
||||
# Set up the mocks to the tasks.
|
||||
mock_get_flv.return_value = self.inst.get_flavor()
|
||||
|
|
|
@ -14,6 +14,9 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
|
||||
from nova import test
|
||||
|
@ -21,7 +24,6 @@ from pypowervm.tests import test_fixtures as pvm_fx
|
|||
from pypowervm.wrappers import storage as pvm_stg
|
||||
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
||||
|
||||
from nova_powervm.virt.powervm import exception as npvmex
|
||||
from nova_powervm.virt.powervm import media as m
|
||||
|
||||
|
||||
|
@ -33,16 +35,13 @@ class TestConfigDrivePowerVM(test.TestCase):
|
|||
|
||||
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
|
||||
|
||||
# Wipe out the static variables, so that the revalidate is called
|
||||
m.ConfigDrivePowerVM._cur_vios_uuid = None
|
||||
m.ConfigDrivePowerVM._cur_vios_name = None
|
||||
m.ConfigDrivePowerVM._cur_vg_uuid = None
|
||||
self.validate_vopt = self.useFixture(fixtures.MockPatch(
|
||||
'pypowervm.tasks.vopt.validate_vopt_repo_exists')).mock
|
||||
self.validate_vopt.return_value = None, None
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('nova.api.metadata.base.InstanceMetadata')
|
||||
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
|
||||
def test_crt_cfg_dr_iso(self, mock_mkdrv, mock_meta, mock_vopt_valid):
|
||||
def test_crt_cfg_dr_iso(self, mock_mkdrv, mock_meta):
|
||||
"""Validates that the image creation method works."""
|
||||
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt, 'host_uuid')
|
||||
mock_instance = mock.MagicMock()
|
||||
|
@ -63,19 +62,18 @@ class TestConfigDrivePowerVM(test.TestCase):
|
|||
self.assertEqual('cfg_fake_instance_with_name_that_.iso', file_name)
|
||||
self.assertEqual('/tmp/cfgdrv/cfg_fake_instance_with_name_that_.iso',
|
||||
iso_path)
|
||||
self.assertTrue(self.validate_vopt.called)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_create_cfg_dr_iso')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('os.path.getsize')
|
||||
@mock.patch('os.remove')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_upload_vopt')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_attach_vopt')
|
||||
def test_crt_cfg_drv_vopt(self, mock_attach, mock_upld,
|
||||
mock_rm, mock_size, mock_validate, mock_cfg_iso):
|
||||
def test_crt_cfg_drv_vopt(self, mock_attach, mock_upld, mock_rm, mock_size,
|
||||
mock_cfg_iso):
|
||||
# Mock Returns
|
||||
mock_cfg_iso.return_value = '/tmp/cfgdrv/fake.iso', 'fake.iso'
|
||||
mock_size.return_value = 10000
|
||||
|
@ -88,14 +86,13 @@ class TestConfigDrivePowerVM(test.TestCase):
|
|||
self.assertTrue(mock_upld.called)
|
||||
self.assertTrue(mock_attach.called)
|
||||
mock_attach.assert_called_with(mock.ANY, 'fake_lpar', mock.ANY, None)
|
||||
self.assertTrue(self.validate_vopt.called)
|
||||
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
@mock.patch('pypowervm.utils.transaction.WrapperTask')
|
||||
def test_attach_vopt(self, mock_class_wrapper_task, mock_validate,
|
||||
mock_build_map, mock_add_map):
|
||||
def test_attach_vopt(self, mock_class_wrapper_task, mock_build_map,
|
||||
mock_add_map):
|
||||
# Create objects to test with
|
||||
mock_instance = mock.MagicMock(name='fake-instance')
|
||||
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt, 'fake_host')
|
||||
|
@ -132,10 +129,9 @@ class TestConfigDrivePowerVM(test.TestCase):
|
|||
self.assertTrue(mock_wrapper_task.execute.called)
|
||||
self.assertEqual(1, mock_build_map.call_count)
|
||||
self.assertEqual(1, mock_add_map.call_count)
|
||||
self.assertTrue(self.validate_vopt.called)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg')
|
||||
def test_mgmt_cna_to_vif(self, mock_validate):
|
||||
def test_mgmt_cna_to_vif(self):
|
||||
mock_cna = mock.MagicMock()
|
||||
mock_cna.mac = "FAD4433ED120"
|
||||
|
||||
|
@ -167,176 +163,6 @@ class TestConfigDrivePowerVM(test.TestCase):
|
|||
self.assertEqual('fe80::fdff:ffff:feff:ffff',
|
||||
m.ConfigDrivePowerVM._mac_to_link_local(mac))
|
||||
|
||||
@mock.patch('pypowervm.wrappers.storage.VG.get')
|
||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
|
||||
def test_validate_vopt_vg1(self, mock_vios_get, mock_vg_get):
|
||||
"""One VIOS, rootvg found; locals are set."""
|
||||
# Init objects to test with
|
||||
mock_vg = mock.Mock()
|
||||
mock_vg.configure_mock(name='rootvg',
|
||||
uuid='1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f',
|
||||
vmedia_repos=['repo'])
|
||||
mock_vg_get.return_value = [mock_vg]
|
||||
mock_vios = mock.Mock()
|
||||
mock_vios.configure_mock(name='the_vios', uuid='vios_uuid',
|
||||
rmc_state='active')
|
||||
mock_vios_get.return_value = [mock_vios]
|
||||
|
||||
# Run
|
||||
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt, 'fake_host')
|
||||
|
||||
# Validate
|
||||
self.assertEqual('1e46bbfd-73b6-3c2a-aeab-a1d3f065e92f',
|
||||
cfg_dr_builder.vg_uuid)
|
||||
self.assertEqual('the_vios', cfg_dr_builder.vios_name)
|
||||
self.assertEqual('vios_uuid', cfg_dr_builder.vios_uuid)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'__init__', new=mock.MagicMock(return_value=None))
|
||||
def _mock_cfg_dr_no_validate(self):
|
||||
"""Mock ConfigDrivePowerVM without running _validate_vopt_vg."""
|
||||
cfg_dr = m.ConfigDrivePowerVM(self.apt, 'fake_host')
|
||||
cfg_dr.adapter = self.apt
|
||||
cfg_dr.host_uuid = 'fake_host'
|
||||
return cfg_dr
|
||||
|
||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
|
||||
@mock.patch('pypowervm.wrappers.storage.VG.get')
|
||||
def test_validate_vopt_vg2(self, mock_vg_get, mock_vios_get):
|
||||
"""Dual VIOS, first is inactive; statics are set."""
|
||||
# Init objects to test with
|
||||
cfg_dr = self._mock_cfg_dr_no_validate()
|
||||
vwrap1 = mock.Mock(rmc_state='#busy')
|
||||
vwrap2 = mock.Mock()
|
||||
vwrap2.configure_mock(name='vname', rmc_state='active', uuid='vio_id')
|
||||
mock_vios_get.return_value = [vwrap1, vwrap2]
|
||||
vg_wrap = mock.Mock()
|
||||
vg_wrap.configure_mock(name='rootvg', vmedia_repos=[1], uuid='vg_uuid')
|
||||
mock_vg_get.return_value = [vg_wrap]
|
||||
|
||||
# Run
|
||||
cfg_dr._validate_vopt_vg()
|
||||
|
||||
# Validate
|
||||
self.assertEqual('vg_uuid', cfg_dr._cur_vg_uuid)
|
||||
self.assertEqual('vio_id', cfg_dr._cur_vios_uuid)
|
||||
self.assertEqual('vname', cfg_dr._cur_vios_name)
|
||||
|
||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
|
||||
@mock.patch('pypowervm.wrappers.storage.VG.get')
|
||||
@mock.patch('pypowervm.wrappers.storage.VMediaRepos.bld')
|
||||
def test_validate_vopt_vg3(self, mock_vmr_bld, mock_vg_get, mock_vios_get):
|
||||
"""Dual VIOS, multiple VGs, repos on non-rootvg."""
|
||||
cfg_dr = self._mock_cfg_dr_no_validate()
|
||||
vwrap1 = mock.Mock()
|
||||
vwrap1.configure_mock(name='vio1', rmc_state='active', uuid='vio_id1')
|
||||
vwrap2 = mock.Mock()
|
||||
vwrap2.configure_mock(name='vio2', rmc_state='active', uuid='vio_id2')
|
||||
mock_vios_get.return_value = [vwrap1, vwrap2]
|
||||
vg1 = mock.Mock()
|
||||
vg1.configure_mock(name='rootvg', vmedia_repos=[], uuid='vg1')
|
||||
vg2 = mock.Mock()
|
||||
vg2.configure_mock(name='other1vg', vmedia_repos=[], uuid='vg2')
|
||||
vg3 = mock.Mock()
|
||||
vg3.configure_mock(name='rootvg', vmedia_repos=[], uuid='vg3')
|
||||
vg4 = mock.Mock()
|
||||
vg4.configure_mock(name='other2vg', vmedia_repos=[1], uuid='vg4')
|
||||
|
||||
# 1: Find the media repos on non-rootvg on the second VIOS
|
||||
mock_vg_get.side_effect = [[vg1, vg2], [vg3, vg4]]
|
||||
|
||||
cfg_dr._validate_vopt_vg()
|
||||
|
||||
# Found the repos on VIOS 2, VG 2
|
||||
self.assertEqual('vg4', cfg_dr._cur_vg_uuid)
|
||||
self.assertEqual('vio_id2', cfg_dr._cur_vios_uuid)
|
||||
self.assertEqual('vio2', cfg_dr._cur_vios_name)
|
||||
|
||||
mock_vios_get.reset_mock()
|
||||
mock_vg_get.reset_mock()
|
||||
|
||||
# 2: At this point, the statics are set. If we validate again, and the
|
||||
# VG.get returns the right one, we should bail out early.
|
||||
mock_vg_get.side_effect = None
|
||||
mock_vg_get.return_value = vg4
|
||||
|
||||
cfg_dr._validate_vopt_vg()
|
||||
|
||||
# Statics unchanged
|
||||
self.assertEqual('vg4', cfg_dr._cur_vg_uuid)
|
||||
self.assertEqual('vio_id2', cfg_dr._cur_vios_uuid)
|
||||
self.assertEqual('vio2', cfg_dr._cur_vios_name)
|
||||
# We didn't have to query the VIOS
|
||||
mock_vios_get.assert_not_called()
|
||||
# We only did VG.get once
|
||||
self.assertEqual(1, mock_vg_get.call_count)
|
||||
|
||||
mock_vg_get.reset_mock()
|
||||
|
||||
# 3: Same again, but this time the repos is somewhere else. We should
|
||||
# find it.
|
||||
vg4.vmedia_repos = []
|
||||
vg2.vmedia_repos = [1]
|
||||
# The first VG.get is looking for the already-set repos. The second
|
||||
# will be the feed from the first VIOS. There should be no third call,
|
||||
# since we should find the repos on VIOS 2.
|
||||
mock_vg_get.side_effect = [vg4, [vg1, vg2]]
|
||||
|
||||
cfg_dr._validate_vopt_vg()
|
||||
|
||||
self.assertEqual('vg2', cfg_dr._cur_vg_uuid)
|
||||
self.assertEqual('vio_id1', cfg_dr._cur_vios_uuid)
|
||||
self.assertEqual('vio1', cfg_dr._cur_vios_name)
|
||||
|
||||
mock_vg_get.reset_mock()
|
||||
mock_vios_get.reset_mock()
|
||||
|
||||
# 4: No repository anywhere - need to create one. The default VG name
|
||||
# (rootvg) exists in multiple places. Ensure we create in the first
|
||||
# one, for efficiency.
|
||||
vg2.vmedia_repos = []
|
||||
mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]]
|
||||
vg1.update.return_value = vg1
|
||||
|
||||
cfg_dr._validate_vopt_vg()
|
||||
|
||||
self.assertEqual('vg1', cfg_dr._cur_vg_uuid)
|
||||
self.assertEqual('vio_id1', cfg_dr._cur_vios_uuid)
|
||||
self.assertEqual('vio1', cfg_dr._cur_vios_name)
|
||||
self.assertEqual([mock_vmr_bld.return_value], vg1.vmedia_repos)
|
||||
|
||||
mock_vg_get.reset_mock()
|
||||
mock_vios_get.reset_mock()
|
||||
vg1.update.reset_mock()
|
||||
|
||||
# 5: No repos - need to create. Make sure conf setting is honored.
|
||||
vg1.vmedia_repos = []
|
||||
self.flags(vopt_media_volume_group='other2vg', group='powervm')
|
||||
|
||||
mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]]
|
||||
vg4.update.return_value = vg4
|
||||
|
||||
cfg_dr._validate_vopt_vg()
|
||||
|
||||
self.assertEqual('vg4', cfg_dr._cur_vg_uuid)
|
||||
self.assertEqual('vio_id2', cfg_dr._cur_vios_uuid)
|
||||
self.assertEqual('vio2', cfg_dr._cur_vios_name)
|
||||
self.assertEqual([mock_vmr_bld.return_value], vg4.vmedia_repos)
|
||||
vg1.update.assert_not_called()
|
||||
|
||||
mock_vg_get.reset_mock()
|
||||
mock_vios_get.reset_mock()
|
||||
|
||||
# 6: No repos, and a configured VG name that doesn't exist
|
||||
vg4.vmedia_repos = []
|
||||
self.flags(vopt_media_volume_group='mythicalvg', group='powervm')
|
||||
mock_vg_get.side_effect = [vg1, [vg1, vg2], [vg3, vg4]]
|
||||
|
||||
self.assertRaises(npvmex.NoMediaRepoVolumeGroupFound,
|
||||
cfg_dr._validate_vopt_vg)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg', new=mock.MagicMock())
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'add_dlt_vopt_tasks')
|
||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.wrap',
|
||||
|
@ -358,8 +184,6 @@ class TestConfigDrivePowerVM(test.TestCase):
|
|||
'2', mock_feed_task, remove_mappings=False)
|
||||
self.assertTrue(mock_feed_task.execute.called)
|
||||
|
||||
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
|
||||
'_validate_vopt_vg', new=mock.MagicMock())
|
||||
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id',
|
||||
new=mock.MagicMock(return_value='2'))
|
||||
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func')
|
||||
|
|
|
@ -21,22 +21,19 @@ from nova.virt import configdrive
|
|||
import os
|
||||
from taskflow import task
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
from pypowervm import const as pvm_const
|
||||
from pypowervm.tasks import scsi_mapper as tsk_map
|
||||
from pypowervm.tasks import storage as tsk_stg
|
||||
from pypowervm.tasks import vopt as tsk_vopt
|
||||
from pypowervm import util as pvm_util
|
||||
from pypowervm.utils import transaction as pvm_tx
|
||||
from pypowervm.wrappers import base_partition as pvm_bp
|
||||
from pypowervm.wrappers import storage as pvm_stg
|
||||
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
||||
|
||||
from nova_powervm import conf as cfg
|
||||
from nova_powervm.virt.powervm import exception as npvmex
|
||||
from nova_powervm.virt.powervm.i18n import _LI
|
||||
from nova_powervm.virt.powervm.i18n import _LW
|
||||
from nova_powervm.virt.powervm import vm
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -48,10 +45,6 @@ _LLA_SUBNET = "fe80::/64"
|
|||
|
||||
class ConfigDrivePowerVM(object):
|
||||
|
||||
_cur_vios_uuid = None
|
||||
_cur_vios_name = None
|
||||
_cur_vg_uuid = None
|
||||
|
||||
def __init__(self, adapter, host_uuid):
|
||||
"""Creates the config drive manager for PowerVM.
|
||||
|
||||
|
@ -61,13 +54,10 @@ class ConfigDrivePowerVM(object):
|
|||
self.adapter = adapter
|
||||
self.host_uuid = host_uuid
|
||||
|
||||
# The validate will use the cached static variables for the VIOS info.
|
||||
# Once validate is done, set the class variables to the updated cache.
|
||||
self._validate_vopt_vg()
|
||||
|
||||
self.vios_uuid = ConfigDrivePowerVM._cur_vios_uuid
|
||||
self.vios_name = ConfigDrivePowerVM._cur_vios_name
|
||||
self.vg_uuid = ConfigDrivePowerVM._cur_vg_uuid
|
||||
# Validate that the virtual optical exists
|
||||
self.vios_uuid, self.vg_uuid = tsk_vopt.validate_vopt_repo_exists(
|
||||
self.adapter, CONF.powervm.vopt_media_volume_group,
|
||||
CONF.powervm.vopt_media_rep_size)
|
||||
|
||||
def _create_cfg_dr_iso(self, instance, injected_files, network_info,
|
||||
admin_pass=None):
|
||||
|
@ -225,118 +215,6 @@ class ConfigDrivePowerVM(object):
|
|||
return tsk_stg.upload_vopt(self.adapter, self.vios_uuid, d_stream,
|
||||
file_name, file_size)
|
||||
|
||||
@lockutils.synchronized('validate_vopt')
|
||||
def _validate_vopt_vg(self):
|
||||
"""Will ensure that the virtual optical media repository exists.
|
||||
|
||||
This method will connect to one of the Virtual I/O Servers on the
|
||||
system and ensure that there is a root_vg that the optical media (which
|
||||
is temporary) exists.
|
||||
|
||||
If the volume group on an I/O Server goes down (perhaps due to
|
||||
maintenance), the system will rescan to determine if there is another
|
||||
I/O Server that can host the request.
|
||||
|
||||
The very first invocation may be expensive. It may also be expensive
|
||||
to call if a Virtual I/O Server unexpectantly goes down.
|
||||
|
||||
If there are no Virtual I/O Servers that can support the media, then
|
||||
an exception will be thrown.
|
||||
"""
|
||||
|
||||
# If our static variables were set, then we should validate that the
|
||||
# repo is still running. Otherwise, we need to reset the variables
|
||||
# (as it could be down for maintenance).
|
||||
if ConfigDrivePowerVM._cur_vg_uuid is not None:
|
||||
vio_uuid = ConfigDrivePowerVM._cur_vios_uuid
|
||||
vg_uuid = ConfigDrivePowerVM._cur_vg_uuid
|
||||
try:
|
||||
vg_wrap = pvm_stg.VG.get(self.adapter, uuid=vg_uuid,
|
||||
parent_type=pvm_vios.VIOS,
|
||||
parent_uuid=vio_uuid)
|
||||
if vg_wrap is not None and len(vg_wrap.vmedia_repos) != 0:
|
||||
return
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
|
||||
LOG.info(_LI("An error occurred querying the virtual optical "
|
||||
"media repository. Attempting to re-establish "
|
||||
"connection with a virtual optical media repository"))
|
||||
|
||||
# If we're hitting this:
|
||||
# a) It's our first time booting up;
|
||||
# b) The previously-used Volume Group went offline (e.g. VIOS went down
|
||||
# for maintenance); OR
|
||||
# c) The previously-used media repository disappeared.
|
||||
#
|
||||
# Since it doesn't matter which VIOS we use for the media repo, we
|
||||
# should query all Virtual I/O Servers and see if an appropriate
|
||||
# media repository exists.
|
||||
vio_wraps = pvm_vios.VIOS.get(self.adapter)
|
||||
|
||||
# First loop through the VIOSes and their VGs to see if a media repos
|
||||
# already exists.
|
||||
found_vg = None
|
||||
found_vios = None
|
||||
|
||||
# And in case we don't find the media repos, keep track of the VG on
|
||||
# which we should create it.
|
||||
conf_vg = None
|
||||
conf_vios = None
|
||||
|
||||
for vio_wrap in vio_wraps:
|
||||
# If the RMC state is not active, skip over to ensure we don't
|
||||
# timeout
|
||||
if vio_wrap.rmc_state != pvm_bp.RMCState.ACTIVE:
|
||||
continue
|
||||
|
||||
try:
|
||||
vg_wraps = pvm_stg.VG.get(self.adapter, parent=vio_wrap)
|
||||
for vg_wrap in vg_wraps:
|
||||
if len(vg_wrap.vmedia_repos) != 0:
|
||||
found_vg = vg_wrap
|
||||
found_vios = vio_wrap
|
||||
break
|
||||
# In case no media repos exists, save a pointer to the
|
||||
# CONFigured vopt_media_volume_group if we find it.
|
||||
if (conf_vg is None and vg_wrap.name ==
|
||||
CONF.powervm.vopt_media_volume_group):
|
||||
conf_vg = vg_wrap
|
||||
conf_vios = vio_wrap
|
||||
|
||||
except Exception:
|
||||
LOG.warning(_LW('Unable to read volume groups for Virtual '
|
||||
'I/O Server %s'), vio_wrap.name)
|
||||
|
||||
# If we found it, don't keep looking
|
||||
if found_vg:
|
||||
break
|
||||
|
||||
# If we didn't find a media repos OR an appropriate volume group, raise
|
||||
# the exception. Since vopt_media_volume_group defaults to rootvg,
|
||||
# which is always present, this should only happen if:
|
||||
# a) No media repos exists on any VIOS we can see; AND
|
||||
# b) The user specified a non-rootvg vopt_media_volume_group; AND
|
||||
# c) The specified volume group did not exist on any VIOS.
|
||||
if found_vg is None and conf_vg is None:
|
||||
raise npvmex.NoMediaRepoVolumeGroupFound(
|
||||
vol_grp=CONF.powervm.vopt_media_volume_group)
|
||||
|
||||
# If no media repos was found, create it.
|
||||
if found_vg is None:
|
||||
found_vg = conf_vg
|
||||
found_vios = conf_vios
|
||||
vopt_repo = pvm_stg.VMediaRepos.bld(
|
||||
self.adapter, 'vopt', str(CONF.powervm.vopt_media_rep_size))
|
||||
found_vg.vmedia_repos = [vopt_repo]
|
||||
found_vg = found_vg.update()
|
||||
|
||||
# At this point, we know that we've successfully found or created the
|
||||
# volume group. Save to the static class variables.
|
||||
ConfigDrivePowerVM._cur_vg_uuid = found_vg.uuid
|
||||
ConfigDrivePowerVM._cur_vios_uuid = found_vios.uuid
|
||||
ConfigDrivePowerVM._cur_vios_name = found_vios.name
|
||||
|
||||
def dlt_vopt(self, lpar_uuid, stg_ftsk=None, remove_mappings=True):
|
||||
"""Deletes the virtual optical and scsi mappings for a VM.
|
||||
|
||||
|
|
Loading…
Reference in New Issue