Accomodate pypowervm restructures

Refactor to accomodate 'tersify' pypowervm change sets:

WrapperClass(entry_or_element) => WrapperClass.wrap(entry_or_element)
WrapperClass.load_from_response(resp) => WrapperClass.wrap(resp)
Various crt_* methods are gone, replaced by WrapperClass.bld(...).
Various wrapper classes are renamed.
Various constants are gone.
Accessing *._entry and *._element is no longer required or allowed.

Change-Id: I0a694553046e269ffc6f637ed0f38374bed2e064
This commit is contained in:
Eric Fried 2015-03-02 13:57:06 -06:00
parent 26d599ec5f
commit 72351aae82
12 changed files with 157 additions and 134 deletions

View File

@ -20,9 +20,9 @@ from nova import exception as nova_exc
from nova import objects
from nova import test
import os
from pypowervm import adapter as adpt
from pypowervm.tests.wrappers.util import pvmhttp
from pypowervm.wrappers import virtual_io_server as vios_w
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.tests.virt import powervm
from nova_powervm.tests.virt.powervm import fixtures as fx
@ -81,7 +81,7 @@ class TestLocalDisk(test.TestCase):
d_size=21474836480L)
self.assertEqual('fake_vol', vol_name.get('device_name'))
@mock.patch('pypowervm.wrappers.storage.VolumeGroup')
@mock.patch('pypowervm.wrappers.storage.VG')
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.'
'_get_vg_uuid')
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.'
@ -90,11 +90,12 @@ class TestLocalDisk(test.TestCase):
"""Tests the capacity methods."""
# Set up the mock data. This will simulate our vg wrapper
wrap = mock.MagicMock(name='vg_wrapper')
type(wrap).capacity = mock.PropertyMock(return_value='5120')
type(wrap).available_size = mock.PropertyMock(return_value='2048')
mock_vg_wrap = mock.MagicMock(name='vg_wrapper')
type(mock_vg_wrap).capacity = mock.PropertyMock(return_value='5120')
type(mock_vg_wrap).available_size = mock.PropertyMock(
return_value='2048')
mock_vg.load_from_response.return_value = wrap
mock_vg.wrap.return_value = mock_vg_wrap
local = self.get_ls(self.apt)
self.assertEqual(5120.0, local.capacity)
@ -114,8 +115,9 @@ class TestLocalDisk(test.TestCase):
def validate_update(*kargs, **kwargs):
# Make sure that the mappings are only 1 (the remaining vopt)
self.assertEqual([vios_w.XAG_VIOS_SCSI_MAPPING], kwargs['xag'])
vio = vios_w.VirtualIOServer(adpt.Entry({}, kargs[0]))
self.assertEqual([pvm_vios.XAGEnum.VIOS_SCSI_MAPPING],
kwargs['xag'])
vio = kargs[0]
self.assertEqual(1, len(vio.scsi_mappings))
self.apt.update.side_effect = validate_update
@ -137,8 +139,9 @@ class TestLocalDisk(test.TestCase):
def validate_update(*kargs, **kwargs):
# No mappings will be removed since the names don't match
self.assertEqual([vios_w.XAG_VIOS_SCSI_MAPPING], kwargs['xag'])
vio = vios_w.VirtualIOServer(adpt.Entry({}, kargs[0]))
self.assertEqual([pvm_vios.XAGEnum.VIOS_SCSI_MAPPING],
kwargs['xag'])
vio = kargs[0]
self.assertEqual(2, len(vio.scsi_mappings))
self.apt.update.side_effect = validate_update
@ -149,7 +152,9 @@ class TestLocalDisk(test.TestCase):
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.'
'_get_vg_uuid')
def test_delete_volumes(self, mock_vg_uuid):
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.'
'_get_vg_wrap')
def test_delete_volumes(self, mock_vg, mock_vg_uuid):
# Mocks
self.apt.side_effect = [self.vg_to_vio]
@ -157,6 +162,9 @@ class TestLocalDisk(test.TestCase):
scsi_mapping = mock.MagicMock()
scsi_mapping.udid = '0300025d4a00007a000000014b36d9deaf.1'
vg = pvm_stg.VG._bld().entry
mock_vg.return_value = pvm_stg.VG.wrap(vg, etag='etag')
# Invoke the call
local = self.get_ls(self.apt)
local.delete_volumes(mock.MagicMock(), mock.MagicMock(),
@ -165,7 +173,7 @@ class TestLocalDisk(test.TestCase):
# Validate the call
self.assertEqual(1, self.apt.update.call_count)
@mock.patch('pypowervm.wrappers.storage.VolumeGroup')
@mock.patch('pypowervm.wrappers.storage.VG')
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.'
'_get_vg_uuid')
@mock.patch('nova_powervm.virt.powervm.disk.localdisk.LocalStorage.'
@ -180,7 +188,7 @@ class TestLocalDisk(test.TestCase):
resp = mock.Mock(name='response')
resp.virtual_disks = [vdisk]
mock_vg.load_from_response.return_value = resp
mock_vg.wrap.return_value = resp
mock_dsk_name.return_value = 'NOMATCH'
self.assertRaises(nova_exc.DiskNotFound, local.extend_volume,

View File

@ -24,9 +24,10 @@ from nova import objects
from nova import test
from nova.tests.unit import fake_instance
from nova.virt import fake
import pypowervm.adapter as pvm_adp
from pypowervm.tests.wrappers.util import pvmhttp
from pypowervm.wrappers import constants as wpr_consts
import pypowervm.wrappers.managed_system as msentry_wrapper
import pypowervm.wrappers.logical_partition as pvm_lpar
import pypowervm.wrappers.managed_system as pvm_ms
from nova_powervm.tests.virt import powervm
from nova_powervm.tests.virt.powervm import fixtures as fx
@ -48,15 +49,15 @@ class TestPowerVMDriver(test.TestCase):
"Could not load %s " %
MS_HTTPRESP_FILE)
entries = ms_http.response.feed.findentries(
wpr_consts.SYSTEM_NAME, MS_NAME)
entries = ms_http.response.feed.findentries(pvm_ms._SYSTEM_NAME,
MS_NAME)
self.assertNotEqual(entries, None,
"Could not find %s in %s" %
(MS_NAME, MS_HTTPRESP_FILE))
self.ms_entry = entries[0]
self.wrapper = msentry_wrapper.ManagedSystem(self.ms_entry)
self.wrapper = pvm_ms.System.wrap(self.ms_entry)
self.drv_fix = self.useFixture(fx.PowerVMComputeDriver())
self.drv = self.drv_fix.drv
@ -111,6 +112,9 @@ class TestPowerVMDriver(test.TestCase):
my_flavor = inst.get_flavor()
mock_get_flv.return_value = my_flavor
mock_cfg_drv.return_value = False
resp = pvm_adp.Response('method', 'path', 'status', 'reason', {})
resp.entry = pvm_lpar.LPAR._bld().entry
mock_crt.return_value = resp
# Invoke the method.
self.drv.spawn('context', inst, mock.Mock(),
@ -142,6 +146,9 @@ class TestPowerVMDriver(test.TestCase):
my_flavor = inst.get_flavor()
mock_get_flv.return_value = my_flavor
mock_cfg_drv.return_value = True
resp = pvm_adp.Response('method', 'path', 'status', 'reason', {})
resp.entry = pvm_lpar.LPAR._bld().entry
mock_crt.return_value = resp
# Invoke the method.
self.drv.spawn('context', inst, mock.Mock(),
@ -170,6 +177,9 @@ class TestPowerVMDriver(test.TestCase):
my_flavor = inst.get_flavor()
mock_get_flv.return_value = my_flavor
mock_cfg_drv.return_value = False
resp = pvm_adp.Response('method', 'path', 'status', 'reason', {})
resp.entry = pvm_lpar.LPAR._bld().entry
mock_crt.return_value = resp
# Make sure power on fails.
mock_pwron.side_effect = exc.Forbidden()
@ -223,12 +233,15 @@ class TestPowerVMDriver(test.TestCase):
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
@mock.patch('nova_powervm.virt.powervm.vm.update')
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
def test_resize(
self, mock_get_flv, mock_update, mock_pwr_off, mock_get_uuid):
def test_resize(self, mock_get_flv, mock_update, mock_pwr_off,
mock_get_uuid):
"""Validates the PowerVM driver resize operation."""
# Set up the mocks to the resize operation.
inst = objects.Instance(**powervm.TEST_INSTANCE)
host = self.drv.get_host_ip_addr()
resp = pvm_adp.Response('method', 'path', 'status', 'reason', {})
resp.entry = pvm_lpar.LPAR._bld().entry
self.apt.read.return_value = resp
# Catch root disk resize smaller.
small_root = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=9)

View File

@ -18,8 +18,7 @@
import logging
from nova import test
from pypowervm.tests.wrappers.util import pvmhttp
from pypowervm.wrappers import constants as wpr_consts
import pypowervm.wrappers.managed_system as msentry_wrapper
import pypowervm.wrappers.managed_system as pvm_ms
from nova_powervm.virt.powervm import host as pvm_host
@ -39,15 +38,15 @@ class TestPowerVMHost(test.TestCase):
"Could not load %s " %
MS_HTTPRESP_FILE)
entries = ms_http.response.feed.findentries(
wpr_consts.SYSTEM_NAME, MS_NAME)
entries = ms_http.response.feed.findentries(pvm_ms._SYSTEM_NAME,
MS_NAME)
self.assertNotEqual(entries, None,
"Could not find %s in %s" %
(MS_NAME, MS_HTTPRESP_FILE))
self.ms_entry = entries[0]
self.wrapper = msentry_wrapper.ManagedSystem(self.ms_entry)
self.wrapper = pvm_ms.System.wrap(self.ms_entry)
def test_host_resources(self):
stats = pvm_host.build_host_resource_from_ms(self.wrapper)

View File

@ -18,10 +18,8 @@ import mock
from nova import test
import os
from pypowervm import adapter as adpt
from pypowervm.tests.wrappers.util import pvmhttp
from pypowervm.wrappers import storage as st_w
from pypowervm.wrappers import virtual_io_server as vios_w
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm import media as m
@ -82,7 +80,8 @@ class TestConfigDrivePowerVM(test.TestCase):
@mock.patch('os.remove')
@mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM.'
'_upload_lv')
@mock.patch('pypowervm.wrappers.virtual_io_server.crt_scsi_map_to_vopt')
@mock.patch('pypowervm.wrappers.virtual_io_server.VSCSIMapping.'
'bld_to_vopt')
def test_crt_cfg_drv_vopt(self, mock_vio_w, mock_upld, mock_rm,
mock_size, mock_validate, mock_cfg_iso):
# Mock Returns
@ -128,14 +127,15 @@ class TestConfigDrivePowerVM(test.TestCase):
if kwargs.get('child_type') is not None:
# This is the VG update. Make sure there are no optical medias
# anymore.
vg = st_w.VolumeGroup(adpt.Entry({}, kargs[0]))
vg = kargs[0]
self.assertEqual(0, len(vg.vmedia_repos[0].optical_media))
elif kwargs.get('xag') is not None:
# This is the VIOS call. Make sure the xag is set and the
# mapping was removed. Originally 2, one for vopt and
# local disk. Should now be 1.
self.assertEqual([vios_w.XAG_VIOS_SCSI_MAPPING], kwargs['xag'])
vio = vios_w.VirtualIOServer(adpt.Entry({}, kargs[0]))
self.assertEqual([pvm_vios.XAGEnum.VIOS_SCSI_MAPPING],
kwargs['xag'])
vio = kargs[0]
self.assertEqual(1, len(vio.scsi_mappings))
else:
self.fail("Shouldn't hit here")

View File

@ -57,8 +57,8 @@ class StorageAdapter(object):
:param lpar_uuid: The UUID for the pypowervm LPAR element.
:param disk_type: The list of disk types to remove or None which means
to remove all disks from the VM.
:return: A list of Mappings (either pypowervm VirtualSCSIMappings or
VirtualFCMappings)
:return: A list of Mappings (either pypowervm VSCSIMappings or
VFCMappings)
"""
pass
@ -68,9 +68,8 @@ class StorageAdapter(object):
:param context: nova context for operation
:param instance: instance to delete the image for.
:param mappings: The mappings that had been used to identify the
backing storage. List of pypowervm
VirtualSCSIMappings or VirtualFCMappings.
Typically derived from disconnect_volume.
backing storage. List of pypowervm VSCSIMappings or
VFCMappings. Typically derived from disconnect_volume.
"""
pass

View File

@ -27,8 +27,7 @@ from nova import image
from nova.i18n import _LI, _LE
from pypowervm import exceptions as pvm_exc
from pypowervm.jobs import upload_lv
from pypowervm.wrappers import constants as pvm_consts
from pypowervm.wrappers import storage as pvm_st
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.virt.powervm.disk import blockdev
@ -133,8 +132,8 @@ class LocalStorage(blockdev.StorageAdapter):
removals.append(vdisk)
break
# We know that the mappings are VirtualSCSIMappings. Remove the
# storage that resides in the scsi map from the volume group
# We know that the mappings are VSCSIMappings. Remove the storage that
# resides in the scsi map from the volume group.
existing_vds = vg_wrap.virtual_disks
for removal in removals:
LOG.info(_LI('Deleting volume: %s') % removal.name,
@ -142,20 +141,22 @@ class LocalStorage(blockdev.StorageAdapter):
existing_vds.remove(removal)
# Now update the volume group to remove the storage.
self.adapter.update(vg_wrap._element, vg_wrap.etag, pvm_vios.VIO_ROOT,
self.vios_uuid, child_type=pvm_st.VG_ROOT,
child_id=self.vg_uuid)
self.adapter.update(
vg_wrap, vg_wrap.etag, pvm_vios.VIOS.schema_type,
root_id=self.vios_uuid, child_type=pvm_stg.VG.schema_type,
child_id=self.vg_uuid)
def disconnect_volume(self, context, instance, lpar_uuid, disk_type=None):
# Quick read the VIOS, using specific extended attribute group
vios_resp = self.adapter.read(pvm_vios.VIO_ROOT, self.vios_uuid,
xag=[pvm_vios.XAG_VIOS_SCSI_MAPPING])
vios_w = pvm_vios.VirtualIOServer.load_from_response(vios_resp)
vios_resp = self.adapter.read(
pvm_vios.VIOS.schema_type, root_id=self.vios_uuid,
xag=[pvm_vios.XAGEnum.VIOS_SCSI_MAPPING])
vios_w = pvm_vios.VIOS.wrap(vios_resp)
# Find the existing mappings, and then pull them off the VIOS
existing_vios_mappings = vios_w.scsi_mappings
existing_maps = vios.get_vscsi_mappings(self.adapter, lpar_uuid,
vios_w, pvm_st.VirtualDisk)
vios_w, pvm_stg.VDisk)
# If disks were specified, only remove those.
if disk_type:
# Get the list of disk names
@ -173,8 +174,9 @@ class LocalStorage(blockdev.StorageAdapter):
existing_vios_mappings.remove(scsi_map)
# Update the VIOS
self.adapter.update(vios_w._element, vios_w.etag, pvm_vios.VIO_ROOT,
vios_w.uuid, xag=[pvm_vios.XAG_VIOS_SCSI_MAPPING])
self.adapter.update(vios_w, vios_w.etag, pvm_vios.VIOS.schema_type,
root_id=vios_w.uuid,
xag=[pvm_vios.XAGEnum.VIOS_SCSI_MAPPING])
# Return the mappings that we just removed.
return disk_maps
@ -245,10 +247,10 @@ class LocalStorage(blockdev.StorageAdapter):
disk_found.capacity = size
# Post it to the VIOS
self.adapter.update(vg_wrap._element, vg_wrap.etag,
pvm_consts.VIOS, root_id=self.vios_uuid,
child_type=pvm_consts.VOL_GROUP,
child_id=self.vg_uuid, xag=None)
self.adapter.update(
vg_wrap, vg_wrap.etag, pvm_vios.VIOS.schema_type,
root_id=self.vios_uuid, child_type=pvm_stg.VG.schema_type,
child_id=self.vg_uuid, xag=None)
# Get the volume name based on the instance and type
vol_name = self._get_disk_name(volume_info['type'], instance)
@ -265,15 +267,14 @@ class LocalStorage(blockdev.StorageAdapter):
def _get_vg_uuid(self, adapter, vios_uuid, name):
try:
resp = adapter.read(pvm_consts.VIOS,
root_id=vios_uuid,
child_type=pvm_consts.VOL_GROUP)
resp = adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid,
child_type=pvm_stg.VG.schema_type)
except Exception as e:
LOG.exception(e)
raise e
# Search the feed for the volume group
vol_grps = pvm_st.VolumeGroup.load_from_response(resp)
vol_grps = pvm_stg.VG.wrap(resp)
for vol_grp in vol_grps:
LOG.info(_LI('Volume group: %s') % vol_grp.name)
if name == vol_grp.name:
@ -282,10 +283,10 @@ class LocalStorage(blockdev.StorageAdapter):
raise VGNotFound(vg_name=name)
def _get_vg(self):
vg_rsp = self.adapter.read(pvm_vios.VIO_ROOT, root_id=self.vios_uuid,
child_type=pvm_st.VG_ROOT,
child_id=self.vg_uuid)
vg_rsp = self.adapter.read(
pvm_vios.VIOS.schema_type, root_id=self.vios_uuid,
child_type=pvm_stg.VG.schema_type, child_id=self.vg_uuid)
return vg_rsp
def _get_vg_wrap(self):
return pvm_st.VolumeGroup.load_from_response(self._get_vg())
return pvm_stg.VG.wrap(self._get_vg())

View File

@ -33,8 +33,7 @@ from pypowervm import adapter as pvm_apt
from pypowervm.helpers import log_helper as log_hlp
from pypowervm import util as pvm_util
from pypowervm.utils import retry as pvm_retry
from pypowervm.wrappers import constants as pvm_consts
from pypowervm.wrappers import managed_system as msentry_wrapper
from pypowervm.wrappers import managed_system as pvm_ms
from nova_powervm.virt.powervm.disk import blockdev
from nova_powervm.virt.powervm.disk import localdisk as blk_lcl
@ -95,9 +94,9 @@ class PowerVMDriver(driver.ComputeDriver):
def _get_host_uuid(self):
# Need to get a list of the hosts, then find the matching one
resp = self.adapter.read(pvm_consts.MGT_SYS)
resp = self.adapter.read(pvm_ms.System.schema_type)
mtms = CONF.pvm_host_mtms
self.host_wrapper = msentry_wrapper.find_entry_by_mtms(resp, mtms)
self.host_wrapper = pvm_ms.find_entry_by_mtms(resp, mtms)
if not self.host_wrapper:
raise Exception("Host %s not found" % CONF.pvm_host_mtms)
self.host_uuid = self.host_wrapper.uuid
@ -392,9 +391,10 @@ class PowerVMDriver(driver.ComputeDriver):
:returns: Dictionary describing resources
"""
resp = self.adapter.read(pvm_consts.MGT_SYS, root_id=self.host_uuid)
resp = self.adapter.read(pvm_ms.System.schema_type,
root_id=self.host_uuid)
if resp:
self.host_wrapper = msentry_wrapper.ManagedSystem(resp.entry)
self.host_wrapper = pvm_ms.System.wrap(resp.entry)
# Get host information
data = pvm_host.build_host_resource_from_ms(self.host_wrapper)

View File

@ -25,9 +25,8 @@ from oslo.config import cfg
from oslo_log import log as logging
from pypowervm.jobs import upload_lv
from pypowervm.wrappers import constants as pvmc
from pypowervm.wrappers import storage as pvm_st
from pypowervm.wrappers import virtual_io_server as vios_w
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import vios
@ -112,7 +111,7 @@ class ConfigDrivePowerVM(object):
:param lpar_uuid: The UUID of the client LPAR
:param admin_pass: Optional password to inject for the VM.
:returns: The VirtualSCSIMapping wrapper that can be added to the VIOS
:returns: The VSCSIMapping wrapper that can be added to the VIOS
to attach it to the VM.
"""
iso_path, file_name = self._create_cfg_dr_iso(instance, injected_files,
@ -128,9 +127,8 @@ class ConfigDrivePowerVM(object):
# Now that it is uploaded, create the vSCSI mappings that link this to
# the VM. Don't run the upload as these are batched in a single call
# to the VIOS later.
elem = vios_w.crt_scsi_map_to_vopt(self.adapter, self.host_uuid,
lpar_uuid, file_name)
return vios_w.VirtualSCSIMapping(elem)
return pvm_vios.VSCSIMapping.bld_to_vopt(self.adapter, self.host_uuid,
lpar_uuid, file_name)
def _upload_lv(self, iso_path, file_name, file_size):
with open(iso_path, 'rb') as d_stream:
@ -145,10 +143,11 @@ class ConfigDrivePowerVM(object):
:return vg_uuid: The Volume Group UUID holding the media repo.
"""
resp = self.adapter.read(pvmc.VIOS, self.vios_uuid, pvmc.VOL_GROUP)
resp = self.adapter.read(pvm_vios.VIOS.schema_type, self.vios_uuid,
pvm_stg.VG.schema_type)
found_vg = None
for vg_entry in resp.feed.entries:
vol_grp = pvm_st.VolumeGroup(vg_entry)
vol_grp = pvm_stg.VG.wrap(vg_entry)
if vol_grp.name == CONF.vopt_media_volume_group:
found_vg = vol_grp
break
@ -166,12 +165,12 @@ class ConfigDrivePowerVM(object):
# Ensure that there is a virtual optical media repository within it.
if len(found_vg.vmedia_repos) == 0:
vopt_repo = pvm_st.crt_vmedia_repo('vopt',
str(CONF.vopt_media_rep_size))
found_vg.vmedia_repos = [pvm_st.VirtualMediaRepository(vopt_repo)]
self.adapter.update(found_vg._entry.element, resp.headers['etag'],
pvmc.VIOS, self.vios_uuid, pvmc.VOL_GROUP,
found_vg.uuid)
vopt_repo = pvm_stg.VMediaRepos.bld('vopt',
str(CONF.vopt_media_rep_size))
found_vg.vmedia_repos = [vopt_repo]
self.adapter.update(found_vg, resp.headers['etag'],
pvm_vios.VIOS.schema_type, self.vios_uuid,
pvm_stg.VG.schema_type, found_vg.uuid)
return found_vg.uuid
@ -179,35 +178,38 @@ class ConfigDrivePowerVM(object):
"""Deletes the virtual optical and scsi mappings for a VM."""
# Read the SCSI mappings from the VIOS.
vio_rsp = self.adapter.read(vios_w.VIO_ROOT, root_id=self.vios_uuid,
xag=[vios_w.XAG_VIOS_SCSI_MAPPING])
vio = vios_w.VirtualIOServer.load_from_response(vio_rsp)
vio_rsp = self.adapter.read(
pvm_vios.VIOS.schema_type, root_id=self.vios_uuid,
xag=[pvm_vios.XAGEnum.VIOS_SCSI_MAPPING])
vio = pvm_vios.VIOS.wrap(vio_rsp)
# Get the mappings to this VM
existing_maps = vios.get_vscsi_mappings(self.adapter, lpar_uuid, vio,
pvm_st.VirtualOpticalMedia)
pvm_stg.VOptMedia)
for scsi_map in existing_maps:
vio.scsi_mappings.remove(scsi_map)
# Remove the mappings
self.adapter.update(vio._element, vio.etag, vios_w.VIO_ROOT,
self.adapter.update(vio, vio.etag, pvm_vios.VIOS.schema_type,
root_id=vio.uuid,
xag=[vios_w.XAG_VIOS_SCSI_MAPPING])
xag=[pvm_vios.XAGEnum.VIOS_SCSI_MAPPING])
# Next delete the media from the volume group...
# The mappings above have the backing storage. Just need to load
# the volume group (there is a new etag after the VIOS update)
# and find the matching ones.
vg_rsp = self.adapter.read(vios_w.VIO_ROOT, root_id=self.vios_uuid,
child_type=pvm_st.VG_ROOT,
vg_rsp = self.adapter.read(pvm_vios.VIOS.schema_type,
root_id=self.vios_uuid,
child_type=pvm_stg.VG.schema_type,
child_id=self.vg_uuid)
volgrp = pvm_st.VolumeGroup.load_from_response(vg_rsp)
volgrp = pvm_stg.VG.wrap(vg_rsp)
optical_medias = volgrp.vmedia_repos[0].optical_media
for scsi_map in existing_maps:
optical_medias.remove(scsi_map.backing_storage)
# Now we can do an update...and be done with it.
self.adapter.update(volgrp._element, volgrp.etag, vios_w.VIO_ROOT,
root_id=self.vios_uuid, child_type=pvm_st.VG_ROOT,
child_id=self.vg_uuid)
self.adapter.update(
volgrp, volgrp.etag, pvm_vios.VIOS.schema_type,
root_id=self.vios_uuid, child_type=pvm_stg.VG.schema_type,
child_id=self.vg_uuid)

View File

@ -170,7 +170,7 @@ class ConnectCfgDrive(task.Task):
LOG.info(_LI('Attaching Config Drive to instance: %s') %
self.instance.name)
vios.add_vscsi_mapping(self.adapter, self.vios_uuid, self.vios_name,
cfg_drv_vscsi_map._element)
cfg_drv_vscsi_map.element)
class DeleteVOpt(task.Task):
@ -208,8 +208,7 @@ class Detach(task.Task):
"""Creates the Task to detach the storage adapters.
Provides the stor_adpt_mappings. A list of pypowervm
VirtualSCSIMappings or VirtualFCMappings (depending on the storage
adapter).
VSCSIMappings or VFCMappings (depending on the storage adapter).
:param block_dvr: The StorageAdapter for the VM.
:param context: The nova context.

View File

@ -75,7 +75,7 @@ class Create(task.Task):
LOG.info(_LI('Creating instance: %s') % self.instance.name)
resp = vm.crt_lpar(self.adapter, self.host_uuid, self.instance,
self.flavor)
return pvm_lpar.LogicalPartition.load_from_response(resp)
return pvm_lpar.LPAR.wrap(resp)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +

View File

@ -22,7 +22,6 @@ import six
from nova.i18n import _LE
from pypowervm import exceptions as pvm_exc
from pypowervm.wrappers import constants as pvm_consts
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import virtual_io_server as pvm_vios
@ -53,8 +52,7 @@ class VIOSNotFound(AbstractVIOSException):
def get_vios_uuid(adapter, name):
searchstring = "(PartitionName=='%s')" % name
try:
resp = adapter.read(pvm_consts.VIOS,
suffix_type='search',
resp = adapter.read(pvm_vios.VIOS.schema_type, suffix_type='search',
suffix_parm=searchstring)
except pvm_exc.Error as e:
if e.response.status == 404:
@ -73,7 +71,7 @@ def get_vios_uuid(adapter, name):
def get_vios_entry(adapter, vios_uuid, vios_name):
try:
resp = adapter.read(pvm_consts.VIOS, root_id=vios_uuid)
resp = adapter.read(pvm_vios.VIOS.schema_type, root_id=vios_uuid)
except pvm_exc.Error as e:
if e.response.status == 404:
raise VIOSNotFound(vios_name=vios_name)
@ -88,7 +86,7 @@ def get_vios_entry(adapter, vios_uuid, vios_name):
def get_vscsi_mappings(adapter, lpar_uuid, vio_wrapper, mapping_type):
"""Returns a list of VirtualSCSIMaps that pair to the instance.
"""Returns a list of VSCSIMappings that pair to the instance.
:param adapter: The pypowervm API Adapter.
:param lpar_uuid: The lpar UUID that identifies which system to get the
@ -96,12 +94,12 @@ def get_vscsi_mappings(adapter, lpar_uuid, vio_wrapper, mapping_type):
:param vio_wrapper: The VIOS pypowervm wrapper for the VIOS. Should have
the mappings within it.
:param mapping_type: The type of mapping to look for. Typically
VirtualOpticalMedia or VirtualDisk
VOptMedia or VDisk
:returns: A list of vSCSI Mappings (pypowervm wrapper) from the VIOS
that are tied to the lpar, for the mapping type.
"""
# Quick read of the partition ID. Identifier between LPAR and VIOS
partition_id = adapter.read(pvm_lpar.LPAR_ROOT, root_id=lpar_uuid,
partition_id = adapter.read(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid,
suffix_type='quick',
suffix_parm='PartitionID').body
@ -131,9 +129,9 @@ def add_vscsi_mapping(adapter, vios_uuid, vios_name, scsi_map):
# Get the VIOS Entry
vios_entry, etag = get_vios_entry(adapter, vios_uuid, vios_name)
# Wrap the entry
vios_wrap = pvm_vios.VirtualIOServer(vios_entry)
vios_wrap = pvm_vios.VIOS.wrap(vios_entry)
# Add the new mapping to the end
vios_wrap.scsi_mappings.append(pvm_vios.VirtualSCSIMapping(scsi_map))
vios_wrap.scsi_mappings.append(pvm_vios.VSCSIMapping.wrap(scsi_map))
# Write it back to the VIOS
adapter.update(vios_wrap._entry.element, etag,
pvm_consts.VIOS, vios_uuid, xag=None)
adapter.update(vios_wrap, etag, pvm_vios.VIOS.schema_type, vios_uuid,
xag=None)

View File

@ -25,8 +25,8 @@ from pypowervm import exceptions as pvm_exc
from pypowervm.jobs import cna
from pypowervm.jobs import power
from pypowervm.jobs import vterm
from pypowervm.wrappers import constants as pvm_consts
from pypowervm.wrappers import logical_partition as pvm_lpar
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import network as pvm_net
import six
@ -104,8 +104,9 @@ class InstanceInfo(hardware.InstanceInfo):
def _get_property(self, q_prop):
try:
resp = self._adapter.read(pvm_consts.LPAR, root_id=self._uuid,
suffix_type='quick', suffix_parm=q_prop)
resp = self._adapter.read(
pvm_lpar.LPAR.schema_type, root_id=self._uuid,
suffix_type='quick', suffix_parm=q_prop)
except pvm_exc.Error as e:
if e.response.status == 404:
raise exception.InstanceNotFound(instance_id=self._name)
@ -167,9 +168,8 @@ def get_lpar_feed(adapter, host_uuid):
feed = None
try:
resp = adapter.read(pvm_consts.MGT_SYS,
root_id=host_uuid,
child_type=pvm_consts.LPAR)
resp = adapter.read(pvm_ms.System.schema_type, root_id=host_uuid,
child_type=pvm_lpar.LPAR.schema_type)
feed = resp.feed
except pvm_exc.Error as e:
LOG.exception(e)
@ -182,14 +182,14 @@ def get_lpar_list(adapter, host_uuid):
feed = get_lpar_feed(adapter, host_uuid)
if feed is not None:
for entry in feed.entries:
name = pvm_lpar.LogicalPartition(entry).name
name = pvm_lpar.LPAR.wrap(entry).name
lpar_list.append(name)
return lpar_list
def get_instance_wrapper(adapter, instance, host_uuid):
"""Get the LogicalPartition wrapper for a given Nova instance.
"""Get the LPAR wrapper for a given Nova instance.
:param adapter: The adapter for the pypowervm API
:param instance: The nova instance.
@ -197,9 +197,10 @@ def get_instance_wrapper(adapter, instance, host_uuid):
:returns: The pypowervm logical_partition wrapper.
"""
pvm_inst_uuid = get_pvm_uuid(instance)
resp = adapter.read(pvm_consts.MGT_SYS, root_id=host_uuid,
child_type=pvm_consts.LPAR, child_id=pvm_inst_uuid)
return pvm_lpar.LogicalPartition.load_from_response(resp)
resp = adapter.read(pvm_ms.System.schema_type, root_id=host_uuid,
child_type=pvm_lpar.LPAR.schema_type,
child_id=pvm_inst_uuid)
return pvm_lpar.LPAR.wrap(resp)
def calc_proc_units(vcpu):
@ -230,15 +231,16 @@ def crt_lpar(adapter, host_uuid, instance, flavor):
sprocs = pvm_lpar.crt_shared_procs(proc_units, vcpus,
uncapped_weight=proc_weight)
lpar_elem = pvm_lpar.crt_lpar(instance.name,
pvm_lpar.LPAR_TYPE_AIXLINUX,
pvm_lpar.LPARTypeEnum.AIXLINUX,
sprocs,
mem,
min_mem=mem,
max_mem=mem,
max_io_slots='64')
return adapter.create(lpar_elem, pvm_consts.MGT_SYS,
root_id=host_uuid, child_type=pvm_lpar.LPAR)
return adapter.create(
lpar_elem, pvm_ms.System.schema_type, root_id=host_uuid,
child_type=pvm_lpar.LPAR.schema_type)
def update(adapter, host_uuid, instance, flavor, entry=None):
@ -274,8 +276,9 @@ def update(adapter, host_uuid, instance, flavor, entry=None):
# Proc weight
entry.uncapped_weight = str(proc_weight)
# Write out the new specs
adapter.update(entry._element, entry.etag, pvm_consts.MGT_SYS,
root_id=host_uuid, child_type=pvm_lpar.LPAR, child_id=uuid)
adapter.update(entry.element, entry.etag, pvm_ms.System.schema_type,
root_id=host_uuid, child_type=pvm_lpar.LPAR.schema_type,
child_id=uuid)
def dlt_lpar(adapter, lpar_uuid):
@ -288,7 +291,7 @@ def dlt_lpar(adapter, lpar_uuid):
# we will close the vterm and try the delete again
try:
LOG.info(_LI('Deleting virtual machine. LPARID: %s') % lpar_uuid)
resp = adapter.delete(pvm_consts.LPAR, root_id=lpar_uuid)
resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
LOG.info(_LI('Virtual machine delete status: %s') % resp.status)
return resp
except pvm_exc.Error as e:
@ -299,7 +302,8 @@ def dlt_lpar(adapter, lpar_uuid):
LOG.info(_LI('Closing virtual terminal'))
vterm.close_vterm(adapter, lpar_uuid)
# Try to delete the vm again
resp = adapter.delete(pvm_consts.LPAR, root_id=lpar_uuid)
resp = adapter.delete(pvm_lpar.LPAR.schema_type,
root_id=lpar_uuid)
LOG.info(_LI('Virtual machine delete status: %s')
% resp.status)
return resp
@ -361,13 +365,13 @@ def get_cnas(adapter, instance, host_uuid):
:param adapter: The pypowervm adapter.
:param instance: The nova instance.
:param host_uuid: The host system UUID.
:returns The ClientNetworkAdapter wrappers that represent the CNAs on the
:returns The CNA wrappers that represent the ClientNetworkAdapters on the
VM.
"""
cna_resp = adapter.read(pvm_lpar.LPAR_ROOT,
cna_resp = adapter.read(pvm_lpar.LPAR.schema_type,
root_id=get_pvm_uuid(instance),
child_type=pvm_net.VADPT_ROOT)
return pvm_net.CNA.load_from_response(cna_resp)
child_type=pvm_net.CNA.schema_type)
return pvm_net.CNA.wrap(cna_resp)
def crt_vif(adapter, instance, host_uuid, vif):
@ -415,7 +419,7 @@ class UUIDCache(object):
# Try to look it up
searchstring = "(PartitionName=='%s')" % name
try:
resp = self._adapter.read(pvm_consts.LPAR,
resp = self._adapter.read(pvm_lpar.LPAR.schema_type,
suffix_type='search',
suffix_parm=searchstring)
except pvm_exc.Error as e: