Remove log translations
Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: I6af522f01a2e8c2d071bc67d7e88b771a45e2ff6
This commit is contained in:
@@ -31,7 +31,6 @@ import pypowervm.wrappers.virtual_io_server as pvm_vios
|
|||||||
|
|
||||||
from nova_powervm.virt.powervm import exception as npvmex
|
from nova_powervm.virt.powervm import exception as npvmex
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import mgmt
|
from nova_powervm.virt.powervm import mgmt
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
@@ -229,10 +228,10 @@ class DiskAdapter(object):
|
|||||||
return stg_elem, vios
|
return stg_elem, vios
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg_args['exc'] = e
|
msg_args['exc'] = e
|
||||||
LOG.warning(_LW("Failed to map boot disk %(disk_name)s of "
|
LOG.warning("Failed to map boot disk %(disk_name)s of "
|
||||||
"instance %(instance_name)s to the management "
|
"instance %(instance_name)s to the management "
|
||||||
"partition from Virtual I/O Server "
|
"partition from Virtual I/O Server "
|
||||||
"%(vios_name)s: %(exc)s"), msg_args)
|
"%(vios_name)s: %(exc)s", msg_args)
|
||||||
# Try the next hit, if available.
|
# Try the next hit, if available.
|
||||||
# We either didn't find the boot dev, or failed all attempts to map it.
|
# We either didn't find the boot dev, or failed all attempts to map it.
|
||||||
raise npvmex.InstanceDiskMappingFailed(**msg_args)
|
raise npvmex.InstanceDiskMappingFailed(**msg_args)
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
from nova.virt import imagecache
|
from nova.virt import imagecache
|
||||||
|
|
||||||
from nova_powervm.virt.powervm.disk import driver
|
from nova_powervm.virt.powervm.disk import driver
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from pypowervm.tasks import storage as tsk_stg
|
from pypowervm.tasks import storage as tsk_stg
|
||||||
@@ -80,7 +79,7 @@ class ImageManager(imagecache.ImageCacheManager):
|
|||||||
# Remove unused
|
# Remove unused
|
||||||
if unused:
|
if unused:
|
||||||
for image in unused:
|
for image in unused:
|
||||||
LOG.info(_LI("Removing unused cache image: '%s'"), image.name)
|
LOG.info("Removing unused cache image: '%s'", image.name)
|
||||||
tsk_stg.rm_vg_storage(base_dir, vdisks=unused)
|
tsk_stg.rm_vg_storage(base_dir, vdisks=unused)
|
||||||
|
|
||||||
def update(self, context, all_instances):
|
def update(self, context, all_instances):
|
||||||
|
|||||||
@@ -35,8 +35,6 @@ from nova_powervm.virt.powervm.disk import driver as disk_dvr
|
|||||||
from nova_powervm.virt.powervm.disk import imagecache
|
from nova_powervm.virt.powervm.disk import imagecache
|
||||||
from nova_powervm.virt.powervm import exception as npvmex
|
from nova_powervm.virt.powervm import exception as npvmex
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
|
|
||||||
@@ -67,7 +65,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
self.image_cache_mgr = imagecache.ImageManager(self._vios_uuid,
|
self.image_cache_mgr = imagecache.ImageManager(self._vios_uuid,
|
||||||
self.vg_uuid, adapter)
|
self.vg_uuid, adapter)
|
||||||
self.cache_lock = lockutils.ReaderWriterLock()
|
self.cache_lock = lockutils.ReaderWriterLock()
|
||||||
LOG.info(_LI("Local Storage driver initialized: volume group: '%s'"),
|
LOG.info("Local Storage driver initialized: volume group: '%s'",
|
||||||
self.vg_name)
|
self.vg_name)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@@ -157,8 +155,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
|
|
||||||
# Make sure the remove function will run within the transaction manager
|
# Make sure the remove function will run within the transaction manager
|
||||||
def rm_func(vios_w):
|
def rm_func(vios_w):
|
||||||
LOG.info(_LI("Disconnecting instance %(inst)s from storage "
|
LOG.info("Disconnecting instance %(inst)s from storage "
|
||||||
"disks."), {'inst': instance.name})
|
"disks.", {'inst': instance.name})
|
||||||
return tsk_map.remove_maps(vios_w, lpar_uuid,
|
return tsk_map.remove_maps(vios_w, lpar_uuid,
|
||||||
match_func=match_func)
|
match_func=match_func)
|
||||||
|
|
||||||
@@ -186,11 +184,10 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
"""
|
"""
|
||||||
tsk_map.remove_vdisk_mapping(self.adapter, vios_uuid, self.mp_uuid,
|
tsk_map.remove_vdisk_mapping(self.adapter, vios_uuid, self.mp_uuid,
|
||||||
disk_names=[disk_name])
|
disk_names=[disk_name])
|
||||||
LOG.info(_LI(
|
LOG.info("Unmapped boot disk %(disk_name)s from the management "
|
||||||
"Unmapped boot disk %(disk_name)s from the management partition "
|
"partition from Virtual I/O Server %(vios_name)s.",
|
||||||
"from Virtual I/O Server %(vios_name)s."), {
|
{'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
|
||||||
'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
|
'vios_name': vios_uuid})
|
||||||
'vios_name': vios_uuid})
|
|
||||||
|
|
||||||
def _create_disk_from_image(self, context, instance, image_meta,
|
def _create_disk_from_image(self, context, instance, image_meta,
|
||||||
image_type=disk_dvr.DiskType.BOOT):
|
image_type=disk_dvr.DiskType.BOOT):
|
||||||
@@ -205,7 +202,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
:param image_type: the image type. See disk constants above.
|
:param image_type: the image type. See disk constants above.
|
||||||
:return: The backing pypowervm storage object that was created.
|
:return: The backing pypowervm storage object that was created.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Create disk.'), instance=instance)
|
LOG.info('Create disk.', instance=instance)
|
||||||
|
|
||||||
# Disk size to API is in bytes. Input from flavor is in Gb
|
# Disk size to API is in bytes. Input from flavor is in Gb
|
||||||
disk_bytes = self._disk_gb_to_bytes(instance.flavor.root_gb,
|
disk_bytes = self._disk_gb_to_bytes(instance.flavor.root_gb,
|
||||||
@@ -272,8 +269,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
|
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
|
||||||
|
|
||||||
def add_func(vios_w):
|
def add_func(vios_w):
|
||||||
LOG.info(_LI("Adding logical volume disk connection between VM "
|
LOG.info("Adding logical volume disk connection between VM "
|
||||||
"%(vm)s and VIOS %(vios)s."),
|
"%(vm)s and VIOS %(vios)s.",
|
||||||
{'vm': instance.name, 'vios': vios_w.name})
|
{'vm': instance.name, 'vios': vios_w.name})
|
||||||
mapping = tsk_map.build_vscsi_mapping(
|
mapping = tsk_map.build_vscsi_mapping(
|
||||||
self.host_uuid, vios_w, lpar_uuid, disk_info)
|
self.host_uuid, vios_w, lpar_uuid, disk_info)
|
||||||
@@ -316,7 +313,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
break
|
break
|
||||||
|
|
||||||
if not disk_found:
|
if not disk_found:
|
||||||
LOG.error(_LE('Disk %s not found during resize.'), vol_name,
|
LOG.error('Disk %s not found during resize.', vol_name,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
raise nova_exc.DiskNotFound(
|
raise nova_exc.DiskNotFound(
|
||||||
location=self.vg_name + '/' + vol_name)
|
location=self.vg_name + '/' + vol_name)
|
||||||
@@ -330,7 +327,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
|
|
||||||
# Get the disk name based on the instance and type
|
# Get the disk name based on the instance and type
|
||||||
vol_name = self._get_disk_name(disk_info['type'], instance, short=True)
|
vol_name = self._get_disk_name(disk_info['type'], instance, short=True)
|
||||||
LOG.info(_LI('Extending disk: %s'), vol_name)
|
LOG.info('Extending disk: %s', vol_name)
|
||||||
try:
|
try:
|
||||||
_extend()
|
_extend()
|
||||||
except pvm_exc.Error:
|
except pvm_exc.Error:
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ from nova_powervm import conf as cfg
|
|||||||
from nova_powervm.virt.powervm.disk import driver as disk_drv
|
from nova_powervm.virt.powervm.disk import driver as disk_drv
|
||||||
from nova_powervm.virt.powervm import exception as npvmex
|
from nova_powervm.virt.powervm import exception as npvmex
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
from nova import image
|
from nova import image
|
||||||
@@ -71,9 +69,9 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
self.ssp_name = self._ssp.name
|
self.ssp_name = self._ssp.name
|
||||||
self.tier_name = self._tier.name
|
self.tier_name = self._tier.name
|
||||||
|
|
||||||
LOG.info(_LI("SSP Storage driver initialized. "
|
LOG.info("SSP Storage driver initialized. "
|
||||||
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'; "
|
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'; "
|
||||||
"Tier '%(tier_name)s"),
|
"Tier '%(tier_name)s",
|
||||||
{'clust_name': self.clust_name, 'ssp_name': self.ssp_name,
|
{'clust_name': self.clust_name, 'ssp_name': self.ssp_name,
|
||||||
'tier_name': self.tier_name})
|
'tier_name': self.tier_name})
|
||||||
|
|
||||||
@@ -142,8 +140,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
|
|
||||||
# Delay run function to remove the mapping between the VM and the LU
|
# Delay run function to remove the mapping between the VM and the LU
|
||||||
def rm_func(vios_w):
|
def rm_func(vios_w):
|
||||||
LOG.info(_LI("Removing SSP disk connection between VM %(vm)s and "
|
LOG.info("Removing SSP disk connection between VM %(vm)s and "
|
||||||
"VIOS %(vios)s."),
|
"VIOS %(vios)s.",
|
||||||
{'vm': instance.name, 'vios': vios_w.name})
|
{'vm': instance.name, 'vios': vios_w.name})
|
||||||
return tsk_map.remove_maps(vios_w, lpar_uuid,
|
return tsk_map.remove_maps(vios_w, lpar_uuid,
|
||||||
match_func=match_func)
|
match_func=match_func)
|
||||||
@@ -186,11 +184,10 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
"""
|
"""
|
||||||
tsk_map.remove_lu_mapping(self.adapter, vios_uuid, self.mp_uuid,
|
tsk_map.remove_lu_mapping(self.adapter, vios_uuid, self.mp_uuid,
|
||||||
disk_names=[disk_name])
|
disk_names=[disk_name])
|
||||||
LOG.info(_LI(
|
LOG.info("Unmapped boot disk %(disk_name)s from the management "
|
||||||
"Unmapped boot disk %(disk_name)s from the management partition "
|
"partition from Virtual I/O Server %(vios_uuid)s.",
|
||||||
"from Virtual I/O Server %(vios_uuid)s."), {
|
{'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
|
||||||
'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
|
'vios_uuid': vios_uuid})
|
||||||
'vios_uuid': vios_uuid})
|
|
||||||
|
|
||||||
def delete_disks(self, storage_elems):
|
def delete_disks(self, storage_elems):
|
||||||
"""Removes the disks specified by the mappings.
|
"""Removes the disks specified by the mappings.
|
||||||
@@ -216,8 +213,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
:param image_type: The image type. See disk_drv.DiskType.
|
:param image_type: The image type. See disk_drv.DiskType.
|
||||||
:return: The backing pypowervm LU storage object that was created.
|
:return: The backing pypowervm LU storage object that was created.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('SSP: Create %(image_type)s disk from image %(image_id)s '
|
LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s '
|
||||||
'for instance %(instance_uuid)s.'),
|
'for instance %(instance_uuid)s.',
|
||||||
dict(image_type=image_type, image_id=image_meta.id,
|
dict(image_type=image_type, image_id=image_meta.id,
|
||||||
instance_uuid=instance.uuid))
|
instance_uuid=instance.uuid))
|
||||||
|
|
||||||
@@ -228,7 +225,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM)
|
image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM)
|
||||||
|
|
||||||
boot_lu_name = self._get_disk_name(image_type, instance)
|
boot_lu_name = self._get_disk_name(image_type, instance)
|
||||||
LOG.info(_LI('SSP: Disk name is %s'), boot_lu_name)
|
LOG.info('SSP: Disk name is %s', boot_lu_name)
|
||||||
|
|
||||||
return tsk_stg.crt_lu(
|
return tsk_stg.crt_lu(
|
||||||
self._tier, boot_lu_name, instance.flavor.root_gb,
|
self._tier, boot_lu_name, instance.flavor.root_gb,
|
||||||
@@ -265,8 +262,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
|
|
||||||
# This is the delay apply mapping
|
# This is the delay apply mapping
|
||||||
def add_func(vios_w):
|
def add_func(vios_w):
|
||||||
LOG.info(_LI("Adding SSP disk connection between VM %(vm)s and "
|
LOG.info("Adding SSP disk connection between VM %(vm)s and "
|
||||||
"VIOS %(vios)s."),
|
"VIOS %(vios)s.",
|
||||||
{'vm': instance.name, 'vios': vios_w.name})
|
{'vm': instance.name, 'vios': vios_w.name})
|
||||||
mapping = tsk_map.build_vscsi_mapping(
|
mapping = tsk_map.build_vscsi_mapping(
|
||||||
self.host_uuid, vios_w, lpar_uuid, lu)
|
self.host_uuid, vios_w, lpar_uuid, lu)
|
||||||
@@ -311,9 +308,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
ssp_uuid = data.get('ssp_uuid')
|
ssp_uuid = data.get('ssp_uuid')
|
||||||
if ssp_uuid is not None:
|
if ssp_uuid is not None:
|
||||||
return ssp_uuid == self._cluster.ssp_uuid
|
return ssp_uuid == self._cluster.ssp_uuid
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.exception(_LE(u'Error checking for shared storage. '
|
LOG.exception('Error checking for shared storage.')
|
||||||
'exception=%s'), e)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def check_instance_shared_storage_cleanup(self, context, data):
|
def check_instance_shared_storage_cleanup(self, context, data):
|
||||||
|
|||||||
@@ -52,9 +52,6 @@ from nova_powervm.virt.powervm.disk import driver as disk_dvr
|
|||||||
from nova_powervm.virt.powervm import event
|
from nova_powervm.virt.powervm import event
|
||||||
from nova_powervm.virt.powervm import host as pvm_host
|
from nova_powervm.virt.powervm import host as pvm_host
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import image as img
|
from nova_powervm.virt.powervm import image as img
|
||||||
from nova_powervm.virt.powervm import live_migration as lpm
|
from nova_powervm.virt.powervm import live_migration as lpm
|
||||||
from nova_powervm.virt.powervm import media
|
from nova_powervm.virt.powervm import media
|
||||||
@@ -123,7 +120,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
pvm_par.validate_vios_ready(self.adapter)
|
pvm_par.validate_vios_ready(self.adapter)
|
||||||
|
|
||||||
# Do a scrub of the I/O plane to make sure the system is in good shape
|
# Do a scrub of the I/O plane to make sure the system is in good shape
|
||||||
LOG.info(_LI("Clearing stale I/O connections on driver init."))
|
LOG.info("Clearing stale I/O connections on driver init.")
|
||||||
pvm_stor.ComprehensiveScrub(self.adapter).execute()
|
pvm_stor.ComprehensiveScrub(self.adapter).execute()
|
||||||
|
|
||||||
# Initialize the disk adapter. Sets self.disk_dvr
|
# Initialize the disk adapter. Sets self.disk_dvr
|
||||||
@@ -144,7 +141,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
# Clean-up any orphan adapters
|
# Clean-up any orphan adapters
|
||||||
self._cleanup_orphan_adapters(CONF.powervm.pvm_vswitch_for_novalink_io)
|
self._cleanup_orphan_adapters(CONF.powervm.pvm_vswitch_for_novalink_io)
|
||||||
|
|
||||||
LOG.info(_LI("The compute driver has been initialized."))
|
LOG.info("The compute driver has been initialized.")
|
||||||
|
|
||||||
def cleanup_host(self, host):
|
def cleanup_host(self, host):
|
||||||
"""Clean up anything that is necessary for the driver gracefully stop.
|
"""Clean up anything that is necessary for the driver gracefully stop.
|
||||||
@@ -157,7 +154,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
LOG.info(_LI("The compute driver has been shutdown."))
|
LOG.info("The compute driver has been shutdown.")
|
||||||
|
|
||||||
def _get_adapter(self):
|
def _get_adapter(self):
|
||||||
# Build the adapter. May need to attempt the connection multiple times
|
# Build the adapter. May need to attempt the connection multiple times
|
||||||
@@ -214,13 +211,13 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
_("Expected exactly one host; found %d"), len(syswraps))
|
_("Expected exactly one host; found %d"), len(syswraps))
|
||||||
self.host_wrapper = syswraps[0]
|
self.host_wrapper = syswraps[0]
|
||||||
self.host_uuid = self.host_wrapper.uuid
|
self.host_uuid = self.host_wrapper.uuid
|
||||||
LOG.info(_LI("Host UUID is:%s"), self.host_uuid)
|
LOG.info("Host UUID is:%s", self.host_uuid)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _log_operation(op, instance):
|
def _log_operation(op, instance):
|
||||||
"""Log entry point of driver operations."""
|
"""Log entry point of driver operations."""
|
||||||
LOG.info(_LI('Operation: %(op)s. Virtual machine display name: '
|
LOG.info('Operation: %(op)s. Virtual machine display name: '
|
||||||
'%(display_name)s, name: %(name)s'),
|
'%(display_name)s, name: %(name)s',
|
||||||
{'op': op, 'display_name': instance.display_name,
|
{'op': op, 'display_name': instance.display_name,
|
||||||
'name': instance.name},
|
'name': instance.name},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
@@ -625,9 +622,9 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
def _rm_vscsi_maps(vwrap):
|
def _rm_vscsi_maps(vwrap):
|
||||||
removals = pvm_smap.remove_maps(vwrap, pvm_inst_uuid)
|
removals = pvm_smap.remove_maps(vwrap, pvm_inst_uuid)
|
||||||
if removals:
|
if removals:
|
||||||
LOG.warning(_LW("Removing %(num_maps)d storage-less VSCSI "
|
LOG.warning("Removing %(num_maps)d storage-less VSCSI "
|
||||||
"mappings associated with LPAR ID "
|
"mappings associated with LPAR ID "
|
||||||
"%(lpar_uuid)s from VIOS %(vios_name)s."),
|
"%(lpar_uuid)s from VIOS %(vios_name)s.",
|
||||||
{'num_maps': len(removals),
|
{'num_maps': len(removals),
|
||||||
'lpar_uuid': pvm_inst_uuid,
|
'lpar_uuid': pvm_inst_uuid,
|
||||||
'vios_name': vwrap.name})
|
'vios_name': vwrap.name})
|
||||||
@@ -665,7 +662,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
pvm_inst_uuid = vm.get_pvm_uuid(instance)
|
pvm_inst_uuid = vm.get_pvm_uuid(instance)
|
||||||
_setup_flow_and_run()
|
_setup_flow_and_run()
|
||||||
except exception.InstanceNotFound:
|
except exception.InstanceNotFound:
|
||||||
LOG.warning(_LW('VM was not found during destroy operation.'),
|
LOG.warning('VM was not found during destroy operation.',
|
||||||
instance=instance)
|
instance=instance)
|
||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -693,7 +690,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param migrate_data: a LiveMigrateData object
|
:param migrate_data: a LiveMigrateData object
|
||||||
"""
|
"""
|
||||||
if instance.task_state == task_states.RESIZE_REVERTING:
|
if instance.task_state == task_states.RESIZE_REVERTING:
|
||||||
LOG.info(_LI('Destroy called for migrated/resized instance.'),
|
LOG.info('Destroy called for migrated/resized instance.',
|
||||||
instance=instance)
|
instance=instance)
|
||||||
# This destroy is part of resize or migrate. It's called to
|
# This destroy is part of resize or migrate. It's called to
|
||||||
# revert the resize/migration on the destination host.
|
# revert the resize/migration on the destination host.
|
||||||
@@ -706,7 +703,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
qprop='PartitionName', log_errors=False)
|
qprop='PartitionName', log_errors=False)
|
||||||
if vm_name == self._gen_resize_name(instance, same_host=True):
|
if vm_name == self._gen_resize_name(instance, same_host=True):
|
||||||
# Since it matches it must have been a resize, don't delete it!
|
# Since it matches it must have been a resize, don't delete it!
|
||||||
LOG.info(_LI('Ignoring destroy call during resize revert.'),
|
LOG.info('Ignoring destroy call during resize revert.',
|
||||||
instance=instance)
|
instance=instance)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -763,8 +760,8 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
# host. If the migration failed, then the VM is probably not on
|
# host. If the migration failed, then the VM is probably not on
|
||||||
# the destination host.
|
# the destination host.
|
||||||
if not vm.instance_exists(self.adapter, instance):
|
if not vm.instance_exists(self.adapter, instance):
|
||||||
LOG.info(_LI('During volume detach, the instance was not found'
|
LOG.info('During volume detach, the instance was not found on '
|
||||||
' on this host.'), instance=instance)
|
'this host.', instance=instance)
|
||||||
|
|
||||||
# Check if there is live migration cleanup to do on this volume.
|
# Check if there is live migration cleanup to do on this volume.
|
||||||
mig = self.live_migrations.get(instance.uuid, None)
|
mig = self.live_migrations.get(instance.uuid, None)
|
||||||
@@ -1060,8 +1057,8 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
# This code was pulled from the libvirt driver.
|
# This code was pulled from the libvirt driver.
|
||||||
ips = compute_utils.get_machine_ips()
|
ips = compute_utils.get_machine_ips()
|
||||||
if CONF.my_ip not in ips:
|
if CONF.my_ip not in ips:
|
||||||
LOG.warning(_LW('my_ip address (%(my_ip)s) was not found on '
|
LOG.warning('my_ip address (%(my_ip)s) was not found on '
|
||||||
'any of the interfaces: %(ifaces)s'),
|
'any of the interfaces: %(ifaces)s',
|
||||||
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
|
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
|
||||||
return CONF.my_ip
|
return CONF.my_ip
|
||||||
|
|
||||||
@@ -1392,8 +1389,8 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
|
|
||||||
:returns: a dict containing migration info (hypervisor-dependent)
|
:returns: a dict containing migration info (hypervisor-dependent)
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Checking live migration capability on destination "
|
LOG.info("Checking live migration capability on destination host.",
|
||||||
"host."), instance=instance)
|
instance=instance)
|
||||||
|
|
||||||
mig = lpm.LiveMigrationDest(self, instance)
|
mig = lpm.LiveMigrationDest(self, instance)
|
||||||
self.live_migrations[instance.uuid] = mig
|
self.live_migrations[instance.uuid] = mig
|
||||||
@@ -1407,8 +1404,8 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param context: security context
|
:param context: security context
|
||||||
:param dest_check_data: result of check_can_live_migrate_destination
|
:param dest_check_data: result of check_can_live_migrate_destination
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Cleaning up from checking live migration capability "
|
LOG.info("Cleaning up from checking live migration capability "
|
||||||
"on destination."))
|
"on destination.")
|
||||||
|
|
||||||
def check_can_live_migrate_source(self, context, instance,
|
def check_can_live_migrate_source(self, context, instance,
|
||||||
dest_check_data, block_device_info=None):
|
dest_check_data, block_device_info=None):
|
||||||
@@ -1423,7 +1420,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param block_device_info: result of _get_instance_block_device_info
|
:param block_device_info: result of _get_instance_block_device_info
|
||||||
:returns: a dict containing migration info (hypervisor-dependent)
|
:returns: a dict containing migration info (hypervisor-dependent)
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Checking live migration capability on source host."),
|
LOG.info("Checking live migration capability on source host.",
|
||||||
instance=instance)
|
instance=instance)
|
||||||
mig = lpm.LiveMigrationSrc(self, instance, dest_check_data)
|
mig = lpm.LiveMigrationSrc(self, instance, dest_check_data)
|
||||||
self.live_migrations[instance.uuid] = mig
|
self.live_migrations[instance.uuid] = mig
|
||||||
@@ -1445,8 +1442,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param disk_info: instance disk information
|
:param disk_info: instance disk information
|
||||||
:param migrate_data: a LiveMigrateData object
|
:param migrate_data: a LiveMigrateData object
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Pre live migration processing."),
|
LOG.info("Pre live migration processing.", instance=instance)
|
||||||
instance=instance)
|
|
||||||
mig = self.live_migrations[instance.uuid]
|
mig = self.live_migrations[instance.uuid]
|
||||||
|
|
||||||
# Get a volume driver for each volume
|
# Get a volume driver for each volume
|
||||||
@@ -1484,7 +1480,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
mig.live_migration(context, migrate_data)
|
mig.live_migration(context, migrate_data)
|
||||||
except pvm_exc.JobRequestTimedOut as timeout_ex:
|
except pvm_exc.JobRequestTimedOut as timeout_ex:
|
||||||
# If the migration operation exceeds configured timeout
|
# If the migration operation exceeds configured timeout
|
||||||
LOG.error(_LE("Live migration timed out. Aborting migration"),
|
LOG.error("Live migration timed out. Aborting migration",
|
||||||
instance=instance)
|
instance=instance)
|
||||||
mig.migration_abort()
|
mig.migration_abort()
|
||||||
self._migration_exception_util(context, instance, dest,
|
self._migration_exception_util(context, instance, dest,
|
||||||
@@ -1523,7 +1519,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param ex: exception reason
|
:param ex: exception reason
|
||||||
|
|
||||||
"""
|
"""
|
||||||
LOG.warning(_LW("Rolling back live migration."), instance=instance)
|
LOG.warning("Rolling back live migration.", instance=instance)
|
||||||
try:
|
try:
|
||||||
mig.rollback_live_migration(context)
|
mig.rollback_live_migration(context)
|
||||||
recover_method(context, instance, dest, migrate_data=migrate_data)
|
recover_method(context, instance, dest, migrate_data=migrate_data)
|
||||||
@@ -1615,7 +1611,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param instance: instance object reference
|
:param instance: instance object reference
|
||||||
:param network_info: instance network information
|
:param network_info: instance network information
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Post live migration processing on source host."),
|
LOG.info("Post live migration processing on source host.",
|
||||||
instance=instance)
|
instance=instance)
|
||||||
mig = self.live_migrations[instance.uuid]
|
mig = self.live_migrations[instance.uuid]
|
||||||
mig.post_live_migration_at_source(network_info)
|
mig.post_live_migration_at_source(network_info)
|
||||||
@@ -1631,7 +1627,7 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
:param network_info: instance network information
|
:param network_info: instance network information
|
||||||
:param block_migration: if true, post operation of block_migration.
|
:param block_migration: if true, post operation of block_migration.
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Post live migration processing on destination host."),
|
LOG.info("Post live migration processing on destination host.",
|
||||||
instance=instance)
|
instance=instance)
|
||||||
mig = self.live_migrations[instance.uuid]
|
mig = self.live_migrations[instance.uuid]
|
||||||
mig.instance = instance
|
mig.instance = instance
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ from pypowervm.tasks.monitor import util as pcm_util
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from nova import conf as cfg
|
from nova import conf as cfg
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -204,13 +203,13 @@ class HostCPUStats(pcm_util.MetricCache):
|
|||||||
# Should not happen, but just in case there is any precision loss from
|
# Should not happen, but just in case there is any precision loss from
|
||||||
# CPU data back to system.
|
# CPU data back to system.
|
||||||
if user_cycles_delta + fw_cycles_delta > tot_cycles_delta:
|
if user_cycles_delta + fw_cycles_delta > tot_cycles_delta:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Host CPU Metrics determined that the total cycles reported "
|
"Host CPU Metrics determined that the total cycles reported "
|
||||||
"was less than the used cycles. This indicates an issue with "
|
"was less than the used cycles. This indicates an issue with "
|
||||||
"the PCM data. Please investigate the results.\n"
|
"the PCM data. Please investigate the results.\n"
|
||||||
"Total Delta Cycles: %(tot_cycles)d\n"
|
"Total Delta Cycles: %(tot_cycles)d\n"
|
||||||
"User Delta Cycles: %(user_cycles)d\n"
|
"User Delta Cycles: %(user_cycles)d\n"
|
||||||
"Firmware Delta Cycles: %(fw_cycles)d"),
|
"Firmware Delta Cycles: %(fw_cycles)d",
|
||||||
{'tot_cycles': tot_cycles_delta, 'fw_cycles': fw_cycles_delta,
|
{'tot_cycles': tot_cycles_delta, 'fw_cycles': fw_cycles_delta,
|
||||||
'user_cycles': user_cycles_delta})
|
'user_cycles': user_cycles_delta})
|
||||||
tot_cycles_delta = user_cycles_delta + fw_cycles_delta
|
tot_cycles_delta = user_cycles_delta + fw_cycles_delta
|
||||||
|
|||||||
@@ -19,7 +19,3 @@ import oslo_i18n
|
|||||||
# Initialize message translators and short cut methods
|
# Initialize message translators and short cut methods
|
||||||
_translators = oslo_i18n.TranslatorFactory(domain='nova-powervm')
|
_translators = oslo_i18n.TranslatorFactory(domain='nova-powervm')
|
||||||
_ = _translators.primary
|
_ = _translators.primary
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
||||||
_LC = _translators.log_critical
|
|
||||||
|
|||||||
@@ -31,8 +31,6 @@ from pypowervm import util
|
|||||||
|
|
||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm import media
|
from nova_powervm.virt.powervm import media
|
||||||
from nova_powervm.virt.powervm import vif
|
from nova_powervm.virt.powervm import vif
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
@@ -163,7 +161,7 @@ class LiveMigrationDest(LiveMigration):
|
|||||||
|
|
||||||
# For each volume, make sure it's ready to migrate
|
# For each volume, make sure it's ready to migrate
|
||||||
for vol_drv in vol_drvs:
|
for vol_drv in vol_drvs:
|
||||||
LOG.info(_LI('Performing pre migration for volume %(volume)s'),
|
LOG.info('Performing pre migration for volume %(volume)s',
|
||||||
dict(volume=vol_drv.volume_id), instance=self.instance)
|
dict(volume=vol_drv.volume_id), instance=self.instance)
|
||||||
try:
|
try:
|
||||||
vol_drv.pre_live_migration_on_destination(
|
vol_drv.pre_live_migration_on_destination(
|
||||||
@@ -201,7 +199,7 @@ class LiveMigrationDest(LiveMigration):
|
|||||||
|
|
||||||
# For each volume, make sure it completes the migration
|
# For each volume, make sure it completes the migration
|
||||||
for vol_drv in vol_drvs:
|
for vol_drv in vol_drvs:
|
||||||
LOG.info(_LI('Performing post migration for volume %(volume)s'),
|
LOG.info('Performing post migration for volume %(volume)s',
|
||||||
dict(volume=vol_drv.volume_id), instance=self.instance)
|
dict(volume=vol_drv.volume_id), instance=self.instance)
|
||||||
try:
|
try:
|
||||||
vol_drv.post_live_migration_at_destination(mig_vol_stor)
|
vol_drv.post_live_migration_at_destination(mig_vol_stor)
|
||||||
@@ -238,7 +236,7 @@ class LiveMigrationDest(LiveMigration):
|
|||||||
|
|
||||||
:param vol_drv: volume driver for the attached volume
|
:param vol_drv: volume driver for the attached volume
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI('Performing detach for volume %(volume)s'),
|
LOG.info('Performing detach for volume %(volume)s',
|
||||||
dict(volume=vol_drv.volume_id), instance=self.instance)
|
dict(volume=vol_drv.volume_id), instance=self.instance)
|
||||||
# Ensure the volume data is present before trying cleanup
|
# Ensure the volume data is present before trying cleanup
|
||||||
if hasattr(self, 'pre_live_vol_data'):
|
if hasattr(self, 'pre_live_vol_data'):
|
||||||
@@ -402,7 +400,7 @@ class LiveMigrationSrc(LiveMigration):
|
|||||||
"""
|
"""
|
||||||
# For each volume, make sure the source is cleaned
|
# For each volume, make sure the source is cleaned
|
||||||
for vol_drv in vol_drvs:
|
for vol_drv in vol_drvs:
|
||||||
LOG.info(_LI('Performing post migration for volume %(volume)s'),
|
LOG.info('Performing post migration for volume %(volume)s',
|
||||||
dict(volume=vol_drv.volume_id), instance=self.instance)
|
dict(volume=vol_drv.volume_id), instance=self.instance)
|
||||||
try:
|
try:
|
||||||
vol_drv.post_live_migration_at_source(migrate_data.vol_data)
|
vol_drv.post_live_migration_at_source(migrate_data.vol_data)
|
||||||
@@ -442,7 +440,7 @@ class LiveMigrationSrc(LiveMigration):
|
|||||||
self.migration_recover()
|
self.migration_recover()
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(_LE("Migration recover failed with error: %s"), ex,
|
LOG.error("Migration recover failed with error: %s", ex,
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
finally:
|
finally:
|
||||||
LOG.debug("Finished migration rollback.", instance=self.instance)
|
LOG.debug("Finished migration rollback.", instance=self.instance)
|
||||||
|
|||||||
@@ -35,7 +35,6 @@ from pypowervm.wrappers import storage as pvm_stg
|
|||||||
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
from pypowervm.wrappers import virtual_io_server as pvm_vios
|
||||||
|
|
||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -98,7 +97,7 @@ class ConfigDrivePowerVM(object):
|
|||||||
:return iso_path: The path to the ISO
|
:return iso_path: The path to the ISO
|
||||||
:return file_name: The file name for the ISO
|
:return file_name: The file name for the ISO
|
||||||
"""
|
"""
|
||||||
LOG.info(_LI("Creating config drive for instance: %s"), instance.name,
|
LOG.info("Creating config drive for instance: %s", instance.name,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
extra_md = {}
|
extra_md = {}
|
||||||
if admin_pass is not None:
|
if admin_pass is not None:
|
||||||
@@ -122,8 +121,8 @@ class ConfigDrivePowerVM(object):
|
|||||||
max_len=pvm_const.MaxLen.VOPT_NAME)
|
max_len=pvm_const.MaxLen.VOPT_NAME)
|
||||||
iso_path = os.path.join(im_path, file_name)
|
iso_path = os.path.join(im_path, file_name)
|
||||||
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
|
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
|
||||||
LOG.info(_LI("Config drive ISO being built for instance %(inst)s "
|
LOG.info("Config drive ISO being built for instance %(inst)s "
|
||||||
"building to path %(iso_path)s."),
|
"building to path %(iso_path)s.",
|
||||||
{'inst': instance.name, 'iso_path': iso_path},
|
{'inst': instance.name, 'iso_path': iso_path},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
# In case, if there's an OSError related failure while
|
# In case, if there's an OSError related failure while
|
||||||
@@ -211,8 +210,8 @@ class ConfigDrivePowerVM(object):
|
|||||||
|
|
||||||
# Define the function to build and add the mapping
|
# Define the function to build and add the mapping
|
||||||
def add_func(vios_w):
|
def add_func(vios_w):
|
||||||
LOG.info(_LI("Adding cfg drive mapping for instance %(inst)s for "
|
LOG.info("Adding cfg drive mapping for instance %(inst)s for "
|
||||||
"Virtual I/O Server %(vios)s"),
|
"Virtual I/O Server %(vios)s",
|
||||||
{'inst': instance.name, 'vios': vios_w.name},
|
{'inst': instance.name, 'vios': vios_w.name},
|
||||||
instance=instance)
|
instance=instance)
|
||||||
mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
|
mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
|
||||||
@@ -341,7 +340,7 @@ class ConfigDrivePowerVM(object):
|
|||||||
media_elems = [x.backing_storage for x in media_mappings]
|
media_elems = [x.backing_storage for x in media_mappings]
|
||||||
|
|
||||||
def rm_vopt():
|
def rm_vopt():
|
||||||
LOG.info(_LI("Removing virtual optical for VM with UUID %s."),
|
LOG.info("Removing virtual optical for VM with UUID %s.",
|
||||||
lpar_uuid)
|
lpar_uuid)
|
||||||
vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid,
|
vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid,
|
||||||
parent_type=pvm_vios.VIOS,
|
parent_type=pvm_vios.VIOS,
|
||||||
|
|||||||
@@ -23,8 +23,6 @@ from pypowervm import exceptions as pvm_exc
|
|||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm.nvram import api
|
from nova_powervm.virt.powervm.nvram import api
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
@@ -106,8 +104,7 @@ class NvramManager(object):
|
|||||||
try:
|
try:
|
||||||
return self._api.fetch(instance)
|
return self._api.fetch(instance)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_LE('Could not update NVRAM: %s'), e,
|
LOG.exception('Could not update NVRAM.', instance=instance)
|
||||||
instance=instance)
|
|
||||||
raise api.NVRAMDownloadException(instance=instance.name,
|
raise api.NVRAMDownloadException(instance=instance.name,
|
||||||
reason=six.text_type(e))
|
reason=six.text_type(e))
|
||||||
|
|
||||||
@@ -124,7 +121,7 @@ class NvramManager(object):
|
|||||||
self._api.delete(instance)
|
self._api.delete(instance)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Delete exceptions should not end the operation
|
# Delete exceptions should not end the operation
|
||||||
LOG.warning(_LW('Could not delete NVRAM: %s'), e,
|
LOG.warning('Could not delete NVRAM: %s', e,
|
||||||
instance=instance)
|
instance=instance)
|
||||||
|
|
||||||
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
|
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
|
||||||
|
|||||||
@@ -25,8 +25,6 @@ import types
|
|||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.conf import powervm
|
from nova_powervm.conf import powervm
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm.nvram import api
|
from nova_powervm.virt.powervm.nvram import api
|
||||||
|
|
||||||
from oslo_concurrency import lockutils
|
from oslo_concurrency import lockutils
|
||||||
@@ -181,8 +179,8 @@ class SwiftNvramStore(api.NvramStore):
|
|||||||
# If upload failed during nvram/slot_map update due to
|
# If upload failed during nvram/slot_map update due to
|
||||||
# expired keystone token, retry swift-client operation
|
# expired keystone token, retry swift-client operation
|
||||||
# to allow regeneration of token
|
# to allow regeneration of token
|
||||||
LOG.warning(_LW('NVRAM upload failed due to invalid '
|
LOG.warning('NVRAM upload failed due to invalid '
|
||||||
'token. Retrying upload.'))
|
'token. Retrying upload.')
|
||||||
return True
|
return True
|
||||||
# The upload failed.
|
# The upload failed.
|
||||||
raise api.NVRAMUploadException(instance=inst_name,
|
raise api.NVRAMUploadException(instance=inst_name,
|
||||||
@@ -218,7 +216,7 @@ class SwiftNvramStore(api.NvramStore):
|
|||||||
data = data.encode('ascii')
|
data = data.encode('ascii')
|
||||||
md5 = hashlib.md5(data).hexdigest()
|
md5 = hashlib.md5(data).hexdigest()
|
||||||
if existing_hash == md5:
|
if existing_hash == md5:
|
||||||
LOG.info(_LI('NVRAM has not changed for instance: %s'),
|
LOG.info('NVRAM has not changed for instance: %s',
|
||||||
instance.name, instance=instance)
|
instance.name, instance=instance)
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -287,7 +285,7 @@ class SwiftNvramStore(api.NvramStore):
|
|||||||
try:
|
try:
|
||||||
os.remove(f.name)
|
os.remove(f.name)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('Could not remove temporary file: %s'), f.name)
|
LOG.warning('Could not remove temporary file: %s', f.name)
|
||||||
|
|
||||||
def delete_slot_map(self, inst_key):
|
def delete_slot_map(self, inst_key):
|
||||||
"""Delete the Slot Map from Swift.
|
"""Delete the Slot Map from Swift.
|
||||||
|
|||||||
@@ -23,7 +23,6 @@ from pypowervm.tasks import slot_map
|
|||||||
from pypowervm.tasks import storage as pvm_tstor
|
from pypowervm.tasks import storage as pvm_tstor
|
||||||
|
|
||||||
from nova_powervm.virt.powervm import exception as p_exc
|
from nova_powervm.virt.powervm import exception as p_exc
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -177,9 +176,9 @@ class SwiftSlotManager(NovaSlotManager):
|
|||||||
try:
|
try:
|
||||||
self.store_api.delete_slot_map(key)
|
self.store_api.delete_slot_map(key)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW("Unable to delete the slot map from Swift backing "
|
LOG.warning("Unable to delete the slot map from Swift backing "
|
||||||
"store with ID %(key)s. Will require "
|
"store with ID %(key)s. Will require "
|
||||||
"manual cleanup."), {'key': key},
|
"manual cleanup.", {'key': key},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,6 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from taskflow import task
|
from taskflow import task
|
||||||
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm import image
|
from nova_powervm.virt.powervm import image
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -74,9 +73,9 @@ class StreamToGlance(task.Task):
|
|||||||
def execute(self, disk_path):
|
def execute(self, disk_path):
|
||||||
metadata = image.snapshot_metadata(self.context, self.image_api,
|
metadata = image.snapshot_metadata(self.context, self.image_api,
|
||||||
self.image_id, self.instance)
|
self.image_id, self.instance)
|
||||||
LOG.info(_LI("Starting stream of boot device for instance %(inst)s "
|
LOG.info("Starting stream of boot device for instance %(inst)s "
|
||||||
"(local blockdev %(devpath)s) to glance image "
|
"(local blockdev %(devpath)s) to glance image "
|
||||||
"%(img_id)s."),
|
"%(img_id)s.",
|
||||||
{'inst': self.instance.name, 'devpath': disk_path,
|
{'inst': self.instance.name, 'devpath': disk_path,
|
||||||
'img_id': self.image_id}, instance=self.instance)
|
'img_id': self.image_id}, instance=self.instance)
|
||||||
image.stream_blockdev_to_glance(self.context, self.image_api,
|
image.stream_blockdev_to_glance(self.context, self.image_api,
|
||||||
|
|||||||
@@ -24,9 +24,6 @@ from pypowervm.wrappers import network as pvm_net
|
|||||||
from taskflow import task
|
from taskflow import task
|
||||||
|
|
||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vif
|
from nova_powervm.virt.powervm import vif
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
@@ -63,9 +60,9 @@ class UnplugVifs(task.Task):
|
|||||||
# error up front.
|
# error up front.
|
||||||
modifiable, reason = lpar_wrap.can_modify_io()
|
modifiable, reason = lpar_wrap.can_modify_io()
|
||||||
if not modifiable:
|
if not modifiable:
|
||||||
LOG.error(_LE('Unable to remove VIFs from instance %(inst)s '
|
LOG.error('Unable to remove VIFs from instance %(inst)s '
|
||||||
'because the system is not in a correct state. '
|
'because the system is not in a correct state. '
|
||||||
'The reason reported by the system is: %(reason)s'),
|
'The reason reported by the system is: %(reason)s',
|
||||||
{'inst': self.instance.name, 'reason': reason},
|
{'inst': self.instance.name, 'reason': reason},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
raise exception.VirtualInterfaceUnplugException(reason=reason)
|
raise exception.VirtualInterfaceUnplugException(reason=reason)
|
||||||
@@ -152,10 +149,10 @@ class PlugVifs(task.Task):
|
|||||||
# Check to see if the LPAR is OK to add VIFs to.
|
# Check to see if the LPAR is OK to add VIFs to.
|
||||||
modifiable, reason = lpar_wrap.can_modify_io()
|
modifiable, reason = lpar_wrap.can_modify_io()
|
||||||
if not modifiable and self.crt_network_infos:
|
if not modifiable and self.crt_network_infos:
|
||||||
LOG.error(_LE('Unable to create VIF(s) for instance %(sys)s. The '
|
LOG.error('Unable to create VIF(s) for instance %(sys)s. The '
|
||||||
'VM was in a state where VIF plugging is not '
|
'VM was in a state where VIF plugging is not '
|
||||||
'acceptable. The reason from the system is: '
|
'acceptable. The reason from the system is: '
|
||||||
'%(reason)s'),
|
'%(reason)s',
|
||||||
{'sys': self.instance.name, 'reason': reason},
|
{'sys': self.instance.name, 'reason': reason},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
raise exception.VirtualInterfaceCreateException()
|
raise exception.VirtualInterfaceCreateException()
|
||||||
@@ -167,8 +164,8 @@ class PlugVifs(task.Task):
|
|||||||
# See: https://bugs.launchpad.net/nova/+bug/1535918
|
# See: https://bugs.launchpad.net/nova/+bug/1535918
|
||||||
undo_host_change = False
|
undo_host_change = False
|
||||||
if self.instance.host != CONF.host:
|
if self.instance.host != CONF.host:
|
||||||
LOG.warning(_LW('Instance was not assigned to this host. '
|
LOG.warning('Instance was not assigned to this host. '
|
||||||
'It was assigned to: %s'), self.instance.host,
|
'It was assigned to: %s', self.instance.host,
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
# Update the instance...
|
# Update the instance...
|
||||||
old_host = self.instance.host
|
old_host = self.instance.host
|
||||||
@@ -180,7 +177,7 @@ class PlugVifs(task.Task):
|
|||||||
# not wait for the neutron event as that likely won't be sent (it was
|
# not wait for the neutron event as that likely won't be sent (it was
|
||||||
# already done).
|
# already done).
|
||||||
for network_info in self.update_network_infos:
|
for network_info in self.update_network_infos:
|
||||||
LOG.info(_LI("Updating VIF with mac %(mac)s for instance %(sys)s"),
|
LOG.info("Updating VIF with mac %(mac)s for instance %(sys)s",
|
||||||
{'mac': network_info['address'],
|
{'mac': network_info['address'],
|
||||||
'sys': self.instance.name}, instance=self.instance)
|
'sys': self.instance.name}, instance=self.instance)
|
||||||
vif.plug(self.adapter, self.host_uuid, self.instance,
|
vif.plug(self.adapter, self.host_uuid, self.instance,
|
||||||
@@ -193,8 +190,8 @@ class PlugVifs(task.Task):
|
|||||||
deadline=CONF.vif_plugging_timeout,
|
deadline=CONF.vif_plugging_timeout,
|
||||||
error_callback=self._vif_callback_failed):
|
error_callback=self._vif_callback_failed):
|
||||||
for network_info in self.crt_network_infos:
|
for network_info in self.crt_network_infos:
|
||||||
LOG.info(_LI('Creating VIF with mac %(mac)s for instance '
|
LOG.info('Creating VIF with mac %(mac)s for instance '
|
||||||
'%(sys)s'),
|
'%(sys)s',
|
||||||
{'mac': network_info['address'],
|
{'mac': network_info['address'],
|
||||||
'sys': self.instance.name},
|
'sys': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
@@ -205,13 +202,13 @@ class PlugVifs(task.Task):
|
|||||||
pvm_net.CNA):
|
pvm_net.CNA):
|
||||||
self.cnas.append(new_vif)
|
self.cnas.append(new_vif)
|
||||||
except eventlet.timeout.Timeout:
|
except eventlet.timeout.Timeout:
|
||||||
LOG.error(_LE('Error waiting for VIF to be created for instance '
|
LOG.error('Error waiting for VIF to be created for instance '
|
||||||
'%(sys)s'), {'sys': self.instance.name},
|
'%(sys)s', {'sys': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
raise exception.VirtualInterfaceCreateException()
|
raise exception.VirtualInterfaceCreateException()
|
||||||
finally:
|
finally:
|
||||||
if undo_host_change:
|
if undo_host_change:
|
||||||
LOG.info(_LI('Undoing temporary host assignment to instance.'),
|
LOG.info('Undoing temporary host assignment to instance.',
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
self.instance.host = old_host
|
self.instance.host = old_host
|
||||||
self.instance.save()
|
self.instance.save()
|
||||||
@@ -219,8 +216,8 @@ class PlugVifs(task.Task):
|
|||||||
return self.cnas
|
return self.cnas
|
||||||
|
|
||||||
def _vif_callback_failed(self, event_name, instance):
|
def _vif_callback_failed(self, event_name, instance):
|
||||||
LOG.error(_LE('VIF Plug failure for callback on event '
|
LOG.error('VIF Plug failure for callback on event '
|
||||||
'%(event)s for instance %(uuid)s'),
|
'%(event)s for instance %(uuid)s',
|
||||||
{'event': event_name, 'uuid': instance.uuid})
|
{'event': event_name, 'uuid': instance.uuid})
|
||||||
if CONF.vif_plugging_is_fatal:
|
if CONF.vif_plugging_is_fatal:
|
||||||
raise exception.VirtualInterfaceCreateException()
|
raise exception.VirtualInterfaceCreateException()
|
||||||
@@ -250,9 +247,8 @@ class PlugVifs(task.Task):
|
|||||||
|
|
||||||
# The parameters have to match the execute method, plus the response +
|
# The parameters have to match the execute method, plus the response +
|
||||||
# failures even if only a subset are used.
|
# failures even if only a subset are used.
|
||||||
LOG.warning(_LW('VIF creation being rolled back for instance '
|
LOG.warning('VIF creation being rolled back for instance %(inst)s',
|
||||||
'%(inst)s'), {'inst': self.instance.name},
|
{'inst': self.instance.name}, instance=self.instance)
|
||||||
instance=self.instance)
|
|
||||||
|
|
||||||
# Get the current adapters on the system
|
# Get the current adapters on the system
|
||||||
cna_w_list = vm.get_cnas(self.adapter, self.instance)
|
cna_w_list = vm.get_cnas(self.adapter, self.instance)
|
||||||
@@ -302,14 +298,14 @@ class PlugMgmtVif(task.Task):
|
|||||||
self.instance.name)
|
self.instance.name)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
LOG.info(_LI('Plugging the Management Network Interface to instance '
|
LOG.info('Plugging the Management Network Interface to instance %s',
|
||||||
'%s'), self.instance.name, instance=self.instance)
|
self.instance.name, instance=self.instance)
|
||||||
# Determine if we need to create the secure RMC VIF. This should only
|
# Determine if we need to create the secure RMC VIF. This should only
|
||||||
# be needed if there is not a VIF on the secure RMC vSwitch
|
# be needed if there is not a VIF on the secure RMC vSwitch
|
||||||
vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
|
vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
|
||||||
if vswitch is None:
|
if vswitch is None:
|
||||||
LOG.warning(_LW('No management VIF created for instance %s due to '
|
LOG.warning('No management VIF created for instance %s due to '
|
||||||
'lack of Management Virtual Switch'),
|
'lack of Management Virtual Switch',
|
||||||
self.instance.name)
|
self.instance.name)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|||||||
@@ -24,8 +24,6 @@ from taskflow.types import failure as task_fail
|
|||||||
|
|
||||||
from nova_powervm.virt.powervm.disk import driver as disk_driver
|
from nova_powervm.virt.powervm.disk import driver as disk_driver
|
||||||
from nova_powervm.virt.powervm import exception as npvmex
|
from nova_powervm.virt.powervm import exception as npvmex
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import media
|
from nova_powervm.virt.powervm import media
|
||||||
from nova_powervm.virt.powervm import mgmt
|
from nova_powervm.virt.powervm import mgmt
|
||||||
|
|
||||||
@@ -51,15 +49,14 @@ class ConnectVolume(task.Task):
|
|||||||
super(ConnectVolume, self).__init__('connect_vol_%s' % self.vol_id)
|
super(ConnectVolume, self).__init__('connect_vol_%s' % self.vol_id)
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.info(_LI('Connecting volume %(vol)s to instance %(inst)s'),
|
LOG.info('Connecting volume %(vol)s to instance %(inst)s',
|
||||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
||||||
self.vol_drv.connect_volume(self.slot_mgr)
|
self.vol_drv.connect_volume(self.slot_mgr)
|
||||||
|
|
||||||
def revert(self, result, flow_failures):
|
def revert(self, result, flow_failures):
|
||||||
# The parameters have to match the execute method, plus the response +
|
# The parameters have to match the execute method, plus the response +
|
||||||
# failures even if only a subset are used.
|
# failures even if only a subset are used.
|
||||||
LOG.warning(_LW('Volume %(vol)s for instance %(inst)s to be '
|
LOG.warning('Volume %(vol)s for instance %(inst)s to be disconnected',
|
||||||
'disconnected'),
|
|
||||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
||||||
|
|
||||||
# Note that the rollback is *instant*. Resetting the FeedTask ensures
|
# Note that the rollback is *instant*. Resetting the FeedTask ensures
|
||||||
@@ -74,8 +71,8 @@ class ConnectVolume(task.Task):
|
|||||||
except npvmex.VolumeDetachFailed as e:
|
except npvmex.VolumeDetachFailed as e:
|
||||||
# Only log that the volume detach failed. Should not be blocking
|
# Only log that the volume detach failed. Should not be blocking
|
||||||
# due to being in the revert flow.
|
# due to being in the revert flow.
|
||||||
LOG.warning(_LW("Unable to disconnect volume for %(inst)s during "
|
LOG.warning("Unable to disconnect volume for %(inst)s during "
|
||||||
"rollback. Error was: %(error)s"),
|
"rollback. Error was: %(error)s",
|
||||||
{'inst': self.vol_drv.instance.name,
|
{'inst': self.vol_drv.instance.name,
|
||||||
'error': e.message})
|
'error': e.message})
|
||||||
|
|
||||||
@@ -100,15 +97,14 @@ class DisconnectVolume(task.Task):
|
|||||||
'disconnect_vol_%s' % self.vol_id)
|
'disconnect_vol_%s' % self.vol_id)
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.info(_LI('Disconnecting volume %(vol)s from instance %(inst)s'),
|
LOG.info('Disconnecting volume %(vol)s from instance %(inst)s',
|
||||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
||||||
self.vol_drv.disconnect_volume(self.slot_mgr)
|
self.vol_drv.disconnect_volume(self.slot_mgr)
|
||||||
|
|
||||||
def revert(self, result, flow_failures):
|
def revert(self, result, flow_failures):
|
||||||
# The parameters have to match the execute method, plus the response +
|
# The parameters have to match the execute method, plus the response +
|
||||||
# failures even if only a subset are used.
|
# failures even if only a subset are used.
|
||||||
LOG.warning(_LW('Volume %(vol)s for instance %(inst)s to be '
|
LOG.warning('Volume %(vol)s for instance %(inst)s to be re-connected',
|
||||||
're-connected'),
|
|
||||||
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
|
||||||
|
|
||||||
# Note that the rollback is *instant*. Resetting the FeedTask ensures
|
# Note that the rollback is *instant*. Resetting the FeedTask ensures
|
||||||
@@ -124,8 +120,8 @@ class DisconnectVolume(task.Task):
|
|||||||
except npvmex.VolumeAttachFailed as e:
|
except npvmex.VolumeAttachFailed as e:
|
||||||
# Only log that the volume attach failed. Should not be blocking
|
# Only log that the volume attach failed. Should not be blocking
|
||||||
# due to being in the revert flow. See comment above.
|
# due to being in the revert flow. See comment above.
|
||||||
LOG.warning(_LW("Unable to re-connect volume for %(inst)s during "
|
LOG.warning("Unable to re-connect volume for %(inst)s during "
|
||||||
"rollback. Error was: %(error)s"),
|
"rollback. Error was: %(error)s",
|
||||||
{'inst': self.vol_drv.instance.name,
|
{'inst': self.vol_drv.instance.name,
|
||||||
'error': e.message})
|
'error': e.message})
|
||||||
|
|
||||||
@@ -262,8 +258,8 @@ class InstanceDiskToMgmt(task.Task):
|
|||||||
# partition from the same VIOS - it is safe to use the first one.
|
# partition from the same VIOS - it is safe to use the first one.
|
||||||
the_map = new_maps[0]
|
the_map = new_maps[0]
|
||||||
# Scan the SCSI bus, discover the disk, find its canonical path.
|
# Scan the SCSI bus, discover the disk, find its canonical path.
|
||||||
LOG.info(_LI("Discovering device and path for mapping of %(dev_name)s "
|
LOG.info("Discovering device and path for mapping of %(dev_name)s "
|
||||||
"on the management partition."),
|
"on the management partition.",
|
||||||
{'dev_name': self.stg_elem.name})
|
{'dev_name': self.stg_elem.name})
|
||||||
self.disk_path = mgmt.discover_vscsi_disk(the_map)
|
self.disk_path = mgmt.discover_vscsi_disk(the_map)
|
||||||
return self.stg_elem, self.vios_wrap, self.disk_path
|
return self.stg_elem, self.vios_wrap, self.disk_path
|
||||||
@@ -277,9 +273,9 @@ class InstanceDiskToMgmt(task.Task):
|
|||||||
if self.vios_wrap is None or self.stg_elem is None:
|
if self.vios_wrap is None or self.stg_elem is None:
|
||||||
# We never even got connected - nothing to do
|
# We never even got connected - nothing to do
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Unmapping boot disk %(disk_name)s of instance "
|
LOG.warning("Unmapping boot disk %(disk_name)s of instance "
|
||||||
"%(instance_name)s from management partition via "
|
"%(instance_name)s from management partition via "
|
||||||
"Virtual I/O Server %(vios_name)s."),
|
"Virtual I/O Server %(vios_name)s.",
|
||||||
{'disk_name': self.stg_elem.name,
|
{'disk_name': self.stg_elem.name,
|
||||||
'instance_name': self.instance.name,
|
'instance_name': self.instance.name,
|
||||||
'vios_name': self.vios_wrap.name})
|
'vios_name': self.vios_wrap.name})
|
||||||
@@ -289,8 +285,8 @@ class InstanceDiskToMgmt(task.Task):
|
|||||||
if self.disk_path is None:
|
if self.disk_path is None:
|
||||||
# We did not discover the disk - nothing else to do.
|
# We did not discover the disk - nothing else to do.
|
||||||
return
|
return
|
||||||
LOG.warning(_LW("Removing disk %(disk_path)s from the management "
|
LOG.warning("Removing disk %(disk_path)s from the management "
|
||||||
"partition."), {'disk_path': self.disk_path})
|
"partition.", {'disk_path': self.disk_path})
|
||||||
mgmt.remove_block_dev(self.disk_path)
|
mgmt.remove_block_dev(self.disk_path)
|
||||||
|
|
||||||
|
|
||||||
@@ -334,15 +330,15 @@ class RemoveInstanceDiskFromMgmt(task.Task):
|
|||||||
# stg_elem is None if boot disk was not mapped to management partition
|
# stg_elem is None if boot disk was not mapped to management partition
|
||||||
if stg_elem is None:
|
if stg_elem is None:
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Unmapping boot disk %(disk_name)s of instance "
|
LOG.info("Unmapping boot disk %(disk_name)s of instance "
|
||||||
"%(instance_name)s from management partition via Virtual "
|
"%(instance_name)s from management partition via Virtual "
|
||||||
"I/O Server %(vios_name)s."),
|
"I/O Server %(vios_name)s.",
|
||||||
{'disk_name': stg_elem.name,
|
{'disk_name': stg_elem.name,
|
||||||
'instance_name': self.instance.name,
|
'instance_name': self.instance.name,
|
||||||
'vios_name': vios_wrap.name})
|
'vios_name': vios_wrap.name})
|
||||||
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
|
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
|
||||||
LOG.info(_LI("Removing disk %(disk_path)s from the management "
|
LOG.info("Removing disk %(disk_path)s from the management "
|
||||||
"partition."), {'disk_path': disk_path})
|
"partition.", {'disk_path': disk_path})
|
||||||
mgmt.remove_block_dev(disk_path)
|
mgmt.remove_block_dev(disk_path)
|
||||||
|
|
||||||
|
|
||||||
@@ -402,8 +398,8 @@ class CreateAndConnectCfgDrive(task.Task):
|
|||||||
try:
|
try:
|
||||||
self.mb.dlt_vopt(lpar_wrap.uuid)
|
self.mb.dlt_vopt(lpar_wrap.uuid)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Vopt removal as part of spawn reversion failed '
|
LOG.warning('Vopt removal as part of spawn reversion failed '
|
||||||
'with: %(exc)s'), {'exc': six.text_type(e)},
|
'with: %(exc)s', {'exc': six.text_type(e)},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|
||||||
|
|
||||||
@@ -505,8 +501,8 @@ class SaveBDM(task.Task):
|
|||||||
super(SaveBDM, self).__init__('save_bdm_%s' % self.bdm.volume_id)
|
super(SaveBDM, self).__init__('save_bdm_%s' % self.bdm.volume_id)
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.info(_LI('Saving block device mapping for volume id %(vol_id)s '
|
LOG.info('Saving block device mapping for volume id %(vol_id)s '
|
||||||
'on instance %(inst)s.'),
|
'on instance %(inst)s.',
|
||||||
{'vol_id': self.bdm.volume_id, 'inst': self.instance.name})
|
{'vol_id': self.bdm.volume_id, 'inst': self.instance.name})
|
||||||
self.bdm.save()
|
self.bdm.save()
|
||||||
|
|
||||||
@@ -535,7 +531,7 @@ class FindDisk(task.Task):
|
|||||||
def execute(self):
|
def execute(self):
|
||||||
disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
|
disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
|
||||||
if not disk:
|
if not disk:
|
||||||
LOG.warning(_LW('Disk not found: %(disk_name)s'),
|
LOG.warning('Disk not found: %(disk_name)s',
|
||||||
{'disk_name':
|
{'disk_name':
|
||||||
self.disk_dvr._get_disk_name(self.disk_type,
|
self.disk_dvr._get_disk_name(self.disk_type,
|
||||||
self.instance),
|
self.instance),
|
||||||
@@ -562,6 +558,6 @@ class ExtendDisk(task.Task):
|
|||||||
super(ExtendDisk, self).__init__('extend_disk_%s' % disk_info['type'])
|
super(ExtendDisk, self).__init__('extend_disk_%s' % disk_info['type'])
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.info(_LI('Extending disk size of disk: %(disk)s size: %(size)s.'),
|
LOG.info('Extending disk size of disk: %(disk)s size: %(size)s.',
|
||||||
{'disk': self.disk_info['type'], 'size': self.size})
|
{'disk': self.disk_info['type'], 'size': self.size})
|
||||||
self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size)
|
self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size)
|
||||||
|
|||||||
@@ -18,13 +18,9 @@ from oslo_log import log as logging
|
|||||||
from pypowervm import const as pvm_const
|
from pypowervm import const as pvm_const
|
||||||
from pypowervm.tasks import partition as pvm_tpar
|
from pypowervm.tasks import partition as pvm_tpar
|
||||||
from pypowervm.tasks import storage as pvm_stg
|
from pypowervm.tasks import storage as pvm_stg
|
||||||
import six
|
|
||||||
from taskflow import task
|
from taskflow import task
|
||||||
from taskflow.types import failure as task_fail
|
from taskflow.types import failure as task_fail
|
||||||
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
from nova.compute import task_states
|
from nova.compute import task_states
|
||||||
@@ -102,7 +98,7 @@ class Create(task.Task):
|
|||||||
def execute(self):
|
def execute(self):
|
||||||
data = None
|
data = None
|
||||||
if self.nvram_mgr is not None:
|
if self.nvram_mgr is not None:
|
||||||
LOG.info(_LI('Fetching NVRAM for instance %s.'),
|
LOG.info('Fetching NVRAM for instance %s.',
|
||||||
self.instance.name, instance=self.instance)
|
self.instance.name, instance=self.instance)
|
||||||
data = self.nvram_mgr.fetch(self.instance)
|
data = self.nvram_mgr.fetch(self.instance)
|
||||||
LOG.debug('NVRAM data is: %s', data, instance=self.instance)
|
LOG.debug('NVRAM data is: %s', data, instance=self.instance)
|
||||||
@@ -118,8 +114,8 @@ class Create(task.Task):
|
|||||||
# build map earlier in the spawn, just before the LPAR is created.
|
# build map earlier in the spawn, just before the LPAR is created.
|
||||||
# Only rebuilds should be passing in None for stg_ftsk.
|
# Only rebuilds should be passing in None for stg_ftsk.
|
||||||
if self.stg_ftsk.name == 'create_scrubber':
|
if self.stg_ftsk.name == 'create_scrubber':
|
||||||
LOG.info(_LI('Scrubbing storage for instance %s as part of '
|
LOG.info('Scrubbing storage for instance %s as part of '
|
||||||
'rebuild.'), self.instance.name,
|
'rebuild.', self.instance.name,
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
self.stg_ftsk.execute()
|
self.stg_ftsk.execute()
|
||||||
|
|
||||||
@@ -181,7 +177,7 @@ class Rename(task.Task):
|
|||||||
self.vm_name = name
|
self.vm_name = name
|
||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
LOG.info(_LI('Renaming instance to name: %s'), self.name,
|
LOG.info('Renaming instance to name: %s', self.name,
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return vm.rename(self.adapter, self.instance, self.vm_name)
|
return vm.rename(self.adapter, self.instance, self.vm_name)
|
||||||
|
|
||||||
@@ -206,7 +202,7 @@ class PowerOn(task.Task):
|
|||||||
vm.power_on(self.adapter, self.instance, opts=self.pwr_opts)
|
vm.power_on(self.adapter, self.instance, opts=self.pwr_opts)
|
||||||
|
|
||||||
def revert(self, result, flow_failures):
|
def revert(self, result, flow_failures):
|
||||||
LOG.warning(_LW('Powering off instance: %s'), self.instance.name)
|
LOG.warning('Powering off instance: %s', self.instance.name)
|
||||||
|
|
||||||
if isinstance(result, task_fail.Failure):
|
if isinstance(result, task_fail.Failure):
|
||||||
# The power on itself failed...can't power off.
|
# The power on itself failed...can't power off.
|
||||||
@@ -260,12 +256,9 @@ class StoreNvram(task.Task):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
self.nvram_mgr.store(self.instance, immediate=self.immediate)
|
self.nvram_mgr.store(self.instance, immediate=self.immediate)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.exception(_LE('Unable to store NVRAM for instance '
|
LOG.exception('Unable to store NVRAM for instance %(name)s.',
|
||||||
'%(name)s. Exception: %(reason)s'),
|
{'name': self.instance.name}, instance=self.instance)
|
||||||
{'name': self.instance.name,
|
|
||||||
'reason': six.text_type(e)},
|
|
||||||
instance=self.instance)
|
|
||||||
|
|
||||||
|
|
||||||
class DeleteNvram(task.Task):
|
class DeleteNvram(task.Task):
|
||||||
@@ -284,19 +277,16 @@ class DeleteNvram(task.Task):
|
|||||||
|
|
||||||
def execute(self):
|
def execute(self):
|
||||||
if self.nvram_mgr is None:
|
if self.nvram_mgr is None:
|
||||||
LOG.info(_LI("No op for NVRAM delete."), instance=self.instance)
|
LOG.info("No op for NVRAM delete.", instance=self.instance)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI('Deleting NVRAM for instance: %s'),
|
LOG.info('Deleting NVRAM for instance: %s',
|
||||||
self.instance.name, instance=self.instance)
|
self.instance.name, instance=self.instance)
|
||||||
try:
|
try:
|
||||||
self.nvram_mgr.remove(self.instance)
|
self.nvram_mgr.remove(self.instance)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
LOG.exception(_LE('Unable to delete NVRAM for instance '
|
LOG.exception('Unable to delete NVRAM for instance %(name)s.',
|
||||||
'%(name)s. Exception: %(reason)s'),
|
{'name': self.instance.name}, instance=self.instance)
|
||||||
{'name': self.instance.name,
|
|
||||||
'reason': six.text_type(e)},
|
|
||||||
instance=self.instance)
|
|
||||||
|
|
||||||
|
|
||||||
class Delete(task.Task):
|
class Delete(task.Task):
|
||||||
|
|||||||
@@ -40,9 +40,6 @@ from pypowervm.wrappers import managed_system as pvm_ms
|
|||||||
from pypowervm.wrappers import network as pvm_net
|
from pypowervm.wrappers import network as pvm_net
|
||||||
|
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@@ -114,8 +111,8 @@ def _push_vif_event(adapter, action, vif_w, instance, vif_type):
|
|||||||
event = pvm_evt.Event.bld(adapter, data, detail)
|
event = pvm_evt.Event.bld(adapter, data, detail)
|
||||||
try:
|
try:
|
||||||
event = event.create()
|
event = event.create()
|
||||||
LOG.debug(_LI('Pushed custom event for consumption by neutron agent: '
|
LOG.debug('Pushed custom event for consumption by neutron agent: %s',
|
||||||
'%s'), str(event), instance=instance)
|
str(event), instance=instance)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception(logger=LOG):
|
with excutils.save_and_reraise_exception(logger=LOG):
|
||||||
LOG.exception('Custom VIF event push failed. %s', str(event),
|
LOG.exception('Custom VIF event push failed. %s', str(event),
|
||||||
@@ -371,21 +368,21 @@ class PvmVifDriver(object):
|
|||||||
|
|
||||||
cna_w = self._find_cna_for_vif(cna_w_list, vif)
|
cna_w = self._find_cna_for_vif(cna_w_list, vif)
|
||||||
if not cna_w:
|
if not cna_w:
|
||||||
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
|
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
|
||||||
'instance %(inst)s. The VIF was not found on '
|
'instance %(inst)s. The VIF was not found on '
|
||||||
'the instance.'),
|
'the instance.',
|
||||||
{'mac': vif['address'], 'inst': self.instance.name},
|
{'mac': vif['address'], 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
LOG.info(_LI('Deleting VIF with mac %(mac)s for instance %(inst)s.'),
|
LOG.info('Deleting VIF with mac %(mac)s for instance %(inst)s.',
|
||||||
{'mac': vif['address'], 'inst': self.instance.name},
|
{'mac': vif['address'], 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
try:
|
try:
|
||||||
cna_w.delete()
|
cna_w.delete()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE('Unable to unplug VIF with mac %(mac)s for instance '
|
LOG.error('Unable to unplug VIF with mac %(mac)s for instance '
|
||||||
'%(inst)s.'),
|
'%(inst)s.',
|
||||||
{'mac': vif['address'], 'inst': self.instance.name},
|
{'mac': vif['address'], 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
LOG.exception("PowerVM error during vif unplug.",
|
LOG.exception("PowerVM error during vif unplug.",
|
||||||
@@ -603,9 +600,9 @@ class PvmLBVifDriver(PvmLioVifDriver):
|
|||||||
# Find the CNA for this vif.
|
# Find the CNA for this vif.
|
||||||
cna_w = self._find_cna_for_vif(cna_w_list, vif)
|
cna_w = self._find_cna_for_vif(cna_w_list, vif)
|
||||||
if not cna_w:
|
if not cna_w:
|
||||||
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
|
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
|
||||||
'instance %(inst)s. The VIF was not found on '
|
'instance %(inst)s. The VIF was not found on '
|
||||||
'the instance.'),
|
'the instance.',
|
||||||
{'mac': vif['address'], 'inst': self.instance.name},
|
{'mac': vif['address'], 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return None
|
return None
|
||||||
@@ -619,8 +616,8 @@ class PvmLBVifDriver(PvmLioVifDriver):
|
|||||||
utils.execute('brctl', 'delif', vif['network']['bridge'],
|
utils.execute('brctl', 'delif', vif['network']['bridge'],
|
||||||
dev_name, run_as_root=True)
|
dev_name, run_as_root=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW('Unable to delete device %(dev_name)s from bridge '
|
LOG.warning('Unable to delete device %(dev_name)s from bridge '
|
||||||
'%(bridge)s. Error: %(error)s'),
|
'%(bridge)s. Error: %(error)s',
|
||||||
{'dev_name': dev_name,
|
{'dev_name': dev_name,
|
||||||
'bridge': vif['network']['bridge'],
|
'bridge': vif['network']['bridge'],
|
||||||
'error': e.message}, instance=self.instance)
|
'error': e.message}, instance=self.instance)
|
||||||
@@ -700,9 +697,9 @@ class PvmVnicSriovVifDriver(PvmVifDriver):
|
|||||||
vnic = vm.get_vnics(
|
vnic = vm.get_vnics(
|
||||||
self.adapter, self.instance, mac=mac, one_result=True)
|
self.adapter, self.instance, mac=mac, one_result=True)
|
||||||
if not vnic:
|
if not vnic:
|
||||||
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
|
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
|
||||||
'instance %(inst)s. No matching vNIC was found '
|
'instance %(inst)s. No matching vNIC was found '
|
||||||
'on the instance. VIF: %(vif)s'),
|
'on the instance. VIF: %(vif)s',
|
||||||
{'mac': mac, 'inst': self.instance.name, 'vif': vif},
|
{'mac': mac, 'inst': self.instance.name, 'vif': vif},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return None
|
return None
|
||||||
@@ -773,9 +770,9 @@ class PvmOvsVifDriver(PvmLioVifDriver):
|
|||||||
# Find the CNA for this vif.
|
# Find the CNA for this vif.
|
||||||
cna_w = self._find_cna_for_vif(cna_w_list, vif)
|
cna_w = self._find_cna_for_vif(cna_w_list, vif)
|
||||||
if not cna_w:
|
if not cna_w:
|
||||||
LOG.warning(_LW('Unable to unplug VIF with mac %(mac)s for '
|
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
|
||||||
'instance %(inst)s. The VIF was not found on '
|
'instance %(inst)s. The VIF was not found on '
|
||||||
'the instance.'),
|
'the instance.',
|
||||||
{'mac': vif['address'], 'inst': self.instance.name},
|
{'mac': vif['address'], 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return None
|
return None
|
||||||
@@ -825,8 +822,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
|
|||||||
|
|
||||||
# Save this data for the migration command.
|
# Save this data for the migration command.
|
||||||
vea_vlan_mappings[vif['address']] = cna_w.pvid
|
vea_vlan_mappings[vif['address']] = cna_w.pvid
|
||||||
LOG.info(_LI("VIF with mac %(mac)s is going on trunk %(dev)s with "
|
LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with "
|
||||||
"PVID %(pvid)s"),
|
"PVID %(pvid)s",
|
||||||
{'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid},
|
{'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|
||||||
@@ -844,9 +841,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
|
|||||||
mac address, value is the destination's
|
mac address, value is the destination's
|
||||||
target hypervisor VLAN.
|
target hypervisor VLAN.
|
||||||
"""
|
"""
|
||||||
LOG.warning(_LW("Rolling back the live migrate of VIF with mac "
|
LOG.warning("Rolling back the live migrate of VIF with mac %(mac)s.",
|
||||||
"%(mac)s."), {'mac': vif['address']},
|
{'mac': vif['address']}, instance=self.instance)
|
||||||
instance=self.instance)
|
|
||||||
|
|
||||||
# We know that we just attached the VIF to the NovaLink VM. Search
|
# We know that we just attached the VIF to the NovaLink VM. Search
|
||||||
# for a trunk adapter with the PVID and vSwitch that we specified
|
# for a trunk adapter with the PVID and vSwitch that we specified
|
||||||
@@ -874,8 +870,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
|
|||||||
|
|
||||||
if trunk:
|
if trunk:
|
||||||
# Delete the peer'd trunk adapter.
|
# Delete the peer'd trunk adapter.
|
||||||
LOG.warning(_LW("Deleting target side trunk adapter %(dev)s for "
|
LOG.warning("Deleting target side trunk adapter %(dev)s for "
|
||||||
"rollback operation"), {'dev': trunk.dev_name},
|
"rollback operation", {'dev': trunk.dev_name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
trunk.delete()
|
trunk.delete()
|
||||||
|
|
||||||
|
|||||||
@@ -21,8 +21,6 @@ from taskflow import task
|
|||||||
|
|
||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm import exception as p_exc
|
from nova_powervm.virt.powervm import exception as p_exc
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
from nova_powervm.virt.powervm.volume import driver as v_driver
|
from nova_powervm.virt.powervm.volume import driver as v_driver
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
@@ -87,7 +85,7 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
|
|||||||
# Check if volume is available in destination.
|
# Check if volume is available in destination.
|
||||||
vol_path = self._get_path()
|
vol_path = self._get_path()
|
||||||
if not os.path.exists(vol_path):
|
if not os.path.exists(vol_path):
|
||||||
LOG.warning(_LW("File not found at path %s"), vol_path,
|
LOG.warning("File not found at path %s", vol_path,
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
raise p_exc.VolumePreMigrationFailed(
|
raise p_exc.VolumePreMigrationFailed(
|
||||||
volume_id=self.volume_id, instance_name=self.instance.name)
|
volume_id=self.volume_id, instance_name=self.instance.name)
|
||||||
@@ -104,8 +102,8 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
|
|||||||
if vios_w.uuid not in self.vios_uuids:
|
if vios_w.uuid not in self.vios_uuids:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
LOG.info(_LI("Adding logical volume disk connection between VM "
|
LOG.info("Adding logical volume disk connection between VM "
|
||||||
"%(vm)s and VIOS %(vios)s."),
|
"%(vm)s and VIOS %(vios)s.",
|
||||||
{'vm': self.instance.name, 'vios': vios_w.name},
|
{'vm': self.instance.name, 'vios': vios_w.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path)
|
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path)
|
||||||
@@ -148,9 +146,8 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
|
|||||||
if vios_w.uuid not in self.vios_uuids:
|
if vios_w.uuid not in self.vios_uuids:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
LOG.info(_LI("Disconnecting instance %(inst)s from storage "
|
LOG.info("Disconnecting instance %(inst)s from storage disks.",
|
||||||
"disks."), {'inst': self.instance.name},
|
{'inst': self.instance.name}, instance=self.instance)
|
||||||
instance=self.instance)
|
|
||||||
removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid,
|
removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid,
|
||||||
match_func=match_func)
|
match_func=match_func)
|
||||||
for rm_map in removed_maps:
|
for rm_map in removed_maps:
|
||||||
|
|||||||
@@ -18,8 +18,6 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm import exception as p_exc
|
from nova_powervm.virt.powervm import exception as p_exc
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
from nova_powervm.virt.powervm.volume import driver as v_driver
|
from nova_powervm.virt.powervm.volume import driver as v_driver
|
||||||
from nova_powervm.virt.powervm.volume import volume as volume
|
from nova_powervm.virt.powervm.volume import volume as volume
|
||||||
@@ -151,28 +149,28 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
|
|||||||
# If we have no device name, at this point
|
# If we have no device name, at this point
|
||||||
# we should not continue. Subsequent scrub code on future
|
# we should not continue. Subsequent scrub code on future
|
||||||
# deploys will clean this up.
|
# deploys will clean this up.
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Disconnect Volume: The backing hdisk for volume "
|
"Disconnect Volume: The backing hdisk for volume "
|
||||||
"%(volume_id)s on Virtual I/O Server %(vios)s is "
|
"%(volume_id)s on Virtual I/O Server %(vios)s is "
|
||||||
"not in a valid state. No disconnect "
|
"not in a valid state. No disconnect "
|
||||||
"actions to be taken as volume is not healthy."),
|
"actions to be taken as volume is not healthy.",
|
||||||
{'volume_id': self.volume_id, 'vios': vios_w.name},
|
{'volume_id': self.volume_id, 'vios': vios_w.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Disconnect Volume: Failed to find disk on Virtual I/O "
|
"Disconnect Volume: Failed to find disk on Virtual I/O "
|
||||||
"Server %(vios_name)s for volume %(volume_id)s."
|
"Server %(vios_name)s for volume %(volume_id)s."
|
||||||
" Error: %(error)s"),
|
" Error: %(error)s",
|
||||||
{'error': e, 'vios_name': vios_w.name,
|
{'error': e, 'vios_name': vios_w.name,
|
||||||
'volume_id': self.volume_id}, instance=self.instance)
|
'volume_id': self.volume_id}, instance=self.instance)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# We have found the device name
|
# We have found the device name
|
||||||
LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
|
LOG.info("Disconnect Volume: Discovered the device %(hdisk)s "
|
||||||
"on Virtual I/O Server %(vios_name)s for volume "
|
"on Virtual I/O Server %(vios_name)s for volume "
|
||||||
"%(volume_id)s."),
|
"%(volume_id)s.",
|
||||||
{'volume_id': self.volume_id,
|
{'volume_id': self.volume_id,
|
||||||
'vios_name': vios_w.name, 'hdisk': device_name},
|
'vios_name': vios_w.name, 'hdisk': device_name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
@@ -204,10 +202,10 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
|
|||||||
# Warn if no hdisks disconnected.
|
# Warn if no hdisks disconnected.
|
||||||
if not any([result['vio_modified']
|
if not any([result['vio_modified']
|
||||||
for result in ret['wrapper_task_rets'].values()]):
|
for result in ret['wrapper_task_rets'].values()]):
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Disconnect Volume: Failed to disconnect the volume "
|
"Disconnect Volume: Failed to disconnect the volume "
|
||||||
"%(volume_id)s on ANY of the Virtual I/O Servers for "
|
"%(volume_id)s on ANY of the Virtual I/O Servers for "
|
||||||
"instance %(inst)s."),
|
"instance %(inst)s.",
|
||||||
{'inst': self.instance.name, 'volume_id': self.volume_id},
|
{'inst': self.instance.name, 'volume_id': self.volume_id},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|
||||||
|
|||||||
@@ -29,9 +29,6 @@ from nova_powervm import conf as cfg
|
|||||||
from nova_powervm.conf import powervm as pvm_cfg
|
from nova_powervm.conf import powervm as pvm_cfg
|
||||||
from nova_powervm.virt.powervm import exception as exc
|
from nova_powervm.virt.powervm import exception as exc
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LE
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm.volume import driver as v_driver
|
from nova_powervm.virt.powervm.volume import driver as v_driver
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@@ -302,9 +299,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# At this point, it should be correct.
|
# At this point, it should be correct.
|
||||||
LOG.info(_LI("Instance %(inst)s has not yet defined a WWPN on "
|
LOG.info("Instance %(inst)s has not yet defined a WWPN on "
|
||||||
"fabric %(fabric)s. Appropriate WWPNs will be "
|
"fabric %(fabric)s. Appropriate WWPNs will be "
|
||||||
"generated."),
|
"generated.",
|
||||||
{'inst': self.instance.name, 'fabric': fabric},
|
{'inst': self.instance.name, 'fabric': fabric},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return True
|
return True
|
||||||
@@ -402,8 +399,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
# were already configured.
|
# were already configured.
|
||||||
for fabric in self._fabric_names():
|
for fabric in self._fabric_names():
|
||||||
fc_state = self._get_fabric_state(fabric)
|
fc_state = self._get_fabric_state(fabric)
|
||||||
LOG.info(_LI(
|
LOG.info(
|
||||||
"NPIV wwpns fabric state=%(st)s for instance %(inst)s"),
|
"NPIV wwpns fabric state=%(st)s for instance %(inst)s",
|
||||||
{'st': fc_state, 'inst': self.instance.name},
|
{'st': fc_state, 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|
||||||
@@ -487,9 +484,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
|
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
|
||||||
LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
|
LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
|
||||||
self._set_fabric_meta(fabric, npiv_port_maps)
|
self._set_fabric_meta(fabric, npiv_port_maps)
|
||||||
LOG.warning(_LW("Had to update the system metadata for the WWPNs "
|
LOG.warning("Had to update the system metadata for the WWPNs due to "
|
||||||
"due to incorrect physical WWPNs on fabric "
|
"incorrect physical WWPNs on fabric %(fabric)s",
|
||||||
"%(fabric)s"),
|
|
||||||
{'fabric': fabric}, instance=self.instance)
|
{'fabric': fabric}, instance=self.instance)
|
||||||
|
|
||||||
return npiv_port_maps
|
return npiv_port_maps
|
||||||
@@ -514,15 +510,15 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
for npiv_port_map in npiv_port_maps:
|
for npiv_port_map in npiv_port_maps:
|
||||||
vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
|
vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
|
||||||
if vios_w is None:
|
if vios_w is None:
|
||||||
LOG.error(_LE("Mappings were not able to find a proper VIOS. "
|
LOG.error("Mappings were not able to find a proper VIOS. "
|
||||||
"The port mappings were %s."), npiv_port_maps,
|
"The port mappings were %s.", npiv_port_maps,
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
raise exc.VolumeAttachFailed(
|
raise exc.VolumeAttachFailed(
|
||||||
volume_id=volume_id, instance_name=self.instance.name,
|
volume_id=volume_id, instance_name=self.instance.name,
|
||||||
reason=_("Unable to find a Virtual I/O Server that "
|
reason=_("Unable to find a Virtual I/O Server that "
|
||||||
"hosts the NPIV port map for the server."))
|
"hosts the NPIV port map for the server."))
|
||||||
ls = [LOG.info, _LI("Adding NPIV mapping for instance %(inst)s "
|
ls = [LOG.info, "Adding NPIV mapping for instance %(inst)s "
|
||||||
"for Virtual I/O Server %(vios)s."),
|
"for Virtual I/O Server %(vios)s.",
|
||||||
{'inst': self.instance.name, 'vios': vios_w.name}]
|
{'inst': self.instance.name, 'vios': vios_w.name}]
|
||||||
|
|
||||||
# Add the subtask to add the specific map.
|
# Add the subtask to add the specific map.
|
||||||
@@ -575,8 +571,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
vios_wraps = self.stg_ftsk.feed
|
vios_wraps = self.stg_ftsk.feed
|
||||||
|
|
||||||
for npiv_port_map in npiv_port_maps:
|
for npiv_port_map in npiv_port_maps:
|
||||||
ls = [LOG.info, _LI("Removing a NPIV mapping for instance "
|
ls = [LOG.info, "Removing a NPIV mapping for instance "
|
||||||
"%(inst)s for fabric %(fabric)s."),
|
"%(inst)s for fabric %(fabric)s.",
|
||||||
{'inst': self.instance.name, 'fabric': fabric}]
|
{'inst': self.instance.name, 'fabric': fabric}]
|
||||||
vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
|
vios_w = pvm_vfcm.find_vios_for_port_map(vios_wraps, npiv_port_map)
|
||||||
|
|
||||||
@@ -587,9 +583,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
pvm_vfcm.remove_maps, self.vm_uuid,
|
pvm_vfcm.remove_maps, self.vm_uuid,
|
||||||
port_map=npiv_port_map, logspec=ls)
|
port_map=npiv_port_map, logspec=ls)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No storage connections found between the "
|
LOG.warning("No storage connections found between the "
|
||||||
"Virtual I/O Servers and FC Fabric "
|
"Virtual I/O Servers and FC Fabric "
|
||||||
"%(fabric)s."), {'fabric': fabric},
|
"%(fabric)s.", {'fabric': fabric},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|
||||||
def _set_fabric_state(self, fabric, state):
|
def _set_fabric_state(self, fabric, state):
|
||||||
@@ -603,7 +599,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
FS_INST_MAPPED: Fabric is mapped with the nova instance.
|
FS_INST_MAPPED: Fabric is mapped with the nova instance.
|
||||||
"""
|
"""
|
||||||
meta_key = self._sys_fabric_state_key(fabric)
|
meta_key = self._sys_fabric_state_key(fabric)
|
||||||
LOG.info(_LI("Setting Fabric state=%(st)s for instance=%(inst)s"),
|
LOG.info("Setting Fabric state=%(st)s for instance=%(inst)s",
|
||||||
{'st': state, 'inst': self.instance.name},
|
{'st': state, 'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
self.instance.system_metadata[meta_key] = state
|
self.instance.system_metadata[meta_key] = state
|
||||||
@@ -655,8 +651,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
meta_elems.append(p_wwpn)
|
meta_elems.append(p_wwpn)
|
||||||
meta_elems.extend(v_wwpn.split())
|
meta_elems.extend(v_wwpn.split())
|
||||||
|
|
||||||
LOG.info(_LI("Fabric %(fabric)s wwpn metadata will be set to "
|
LOG.info("Fabric %(fabric)s wwpn metadata will be set to "
|
||||||
"%(meta)s for instance %(inst)s"),
|
"%(meta)s for instance %(inst)s",
|
||||||
{'fabric': fabric, 'meta': ",".join(meta_elems),
|
{'fabric': fabric, 'meta': ",".join(meta_elems),
|
||||||
'inst': self.instance.name},
|
'inst': self.instance.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
@@ -700,10 +696,10 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
|
|||||||
|
|
||||||
if self.instance.system_metadata.get(meta_key) is None:
|
if self.instance.system_metadata.get(meta_key) is None:
|
||||||
# If no mappings exist, log a warning.
|
# If no mappings exist, log a warning.
|
||||||
LOG.warning(_LW("No NPIV mappings exist for instance %(inst)s on "
|
LOG.warning("No NPIV mappings exist for instance %(inst)s on "
|
||||||
"fabric %(fabric)s. May not have connected to "
|
"fabric %(fabric)s. May not have connected to "
|
||||||
"the fabric yet or fabric configuration was "
|
"the fabric yet or fabric configuration was "
|
||||||
"recently modified."),
|
"recently modified.",
|
||||||
{'inst': self.instance.name, 'fabric': fabric},
|
{'inst': self.instance.name, 'fabric': fabric},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return []
|
return []
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ from taskflow import task
|
|||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm import exception as p_exc
|
from nova_powervm.virt.powervm import exception as p_exc
|
||||||
from nova_powervm.virt.powervm.i18n import _
|
from nova_powervm.virt.powervm.i18n import _
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
|
|
||||||
from pypowervm import const as pvm_const
|
from pypowervm import const as pvm_const
|
||||||
@@ -124,7 +122,6 @@ class VscsiVolumeAdapter(object):
|
|||||||
'found on %(vios_act)d Virtual I/O Servers.') %
|
'found on %(vios_act)d Virtual I/O Servers.') %
|
||||||
{'volume_id': self.volume_id, 'vios_act': num_vioses_found,
|
{'volume_id': self.volume_id, 'vios_act': num_vioses_found,
|
||||||
'vios_req': CONF.powervm.vscsi_vios_connections_required})
|
'vios_req': CONF.powervm.vscsi_vios_connections_required})
|
||||||
LOG.error(msg)
|
|
||||||
ex_args = {'volume_id': self.volume_id, 'reason': msg,
|
ex_args = {'volume_id': self.volume_id, 'reason': msg,
|
||||||
'instance_name': self.instance.name}
|
'instance_name': self.instance.name}
|
||||||
raise p_exc.VolumeAttachFailed(**ex_args)
|
raise p_exc.VolumeAttachFailed(**ex_args)
|
||||||
@@ -145,9 +142,9 @@ class VscsiVolumeAdapter(object):
|
|||||||
for a particular bus, or none of them.
|
for a particular bus, or none of them.
|
||||||
"""
|
"""
|
||||||
def add_func(vios_w):
|
def add_func(vios_w):
|
||||||
LOG.info(_LI("Adding vSCSI mapping to Physical Volume %(dev)s "
|
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s "
|
||||||
"to VM %(vm)s"), {'dev': device_name,
|
"to VM %(vm)s", {'dev': device_name,
|
||||||
'vm': self.vm_uuid})
|
'vm': self.vm_uuid})
|
||||||
pv = pvm_stor.PV.bld(self.adapter, device_name, udid)
|
pv = pvm_stor.PV.bld(self.adapter, device_name, udid)
|
||||||
v_map = tsk_map.build_vscsi_mapping(
|
v_map = tsk_map.build_vscsi_mapping(
|
||||||
self.host_uuid, vios_w, self.vm_uuid, pv,
|
self.host_uuid, vios_w, self.vm_uuid, pv,
|
||||||
@@ -165,8 +162,8 @@ class VscsiVolumeAdapter(object):
|
|||||||
except (KeyError, ValueError):
|
except (KeyError, ValueError):
|
||||||
# It's common to lose our specific data in the BDM. The connection
|
# It's common to lose our specific data in the BDM. The connection
|
||||||
# information can be 'refreshed' by operations like LPM and resize
|
# information can be 'refreshed' by operations like LPM and resize
|
||||||
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
|
LOG.info('Failed to retrieve device_id key from BDM for volume id '
|
||||||
'volume id %s'), self.volume_id)
|
'%s', self.volume_id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _set_udid(self, udid):
|
def _set_udid(self, udid):
|
||||||
@@ -186,8 +183,8 @@ class VscsiVolumeAdapter(object):
|
|||||||
except (KeyError, ValueError):
|
except (KeyError, ValueError):
|
||||||
# It's common to lose our specific data in the BDM. The connection
|
# It's common to lose our specific data in the BDM. The connection
|
||||||
# information can be 'refreshed' by operations like LPM and resize
|
# information can be 'refreshed' by operations like LPM and resize
|
||||||
LOG.info(_LI(u'Failed to retrieve device_id key from BDM for '
|
LOG.info('Failed to retrieve device_id key from BDM for volume id '
|
||||||
'volume id %s'), self.volume_id)
|
'%s', self.volume_id)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _set_devname(self, devname):
|
def _set_devname(self, devname):
|
||||||
@@ -208,8 +205,8 @@ class VscsiVolumeAdapter(object):
|
|||||||
used when a volume is detached from the VM.
|
used when a volume is detached from the VM.
|
||||||
"""
|
"""
|
||||||
def rm_func(vios_w):
|
def rm_func(vios_w):
|
||||||
LOG.info(_LI("Removing vSCSI mapping from Physical Volume %(dev)s "
|
LOG.info("Removing vSCSI mapping from Physical Volume %(dev)s "
|
||||||
"to VM %(vm)s"), {'dev': device_name, 'vm': vm_uuid})
|
"to VM %(vm)s", {'dev': device_name, 'vm': vm_uuid})
|
||||||
removed_maps = tsk_map.remove_maps(
|
removed_maps = tsk_map.remove_maps(
|
||||||
vios_w, vm_uuid,
|
vios_w, vm_uuid,
|
||||||
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
|
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
|
||||||
@@ -232,15 +229,15 @@ class VscsiVolumeAdapter(object):
|
|||||||
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
|
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
|
||||||
"""
|
"""
|
||||||
def rm_hdisk():
|
def rm_hdisk():
|
||||||
LOG.info(_LI("Running remove for hdisk: '%s'"), device_name)
|
LOG.info("Running remove for hdisk: '%s'", device_name)
|
||||||
try:
|
try:
|
||||||
# Attempt to remove the hDisk
|
# Attempt to remove the hDisk
|
||||||
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
|
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
|
||||||
vio_wrap.uuid)
|
vio_wrap.uuid)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# If there is a failure, log it, but don't stop the process
|
# If there is a failure, log it, but don't stop the process
|
||||||
LOG.warning(_LW("There was an error removing the hdisk "
|
LOG.warning("There was an error removing the hdisk "
|
||||||
"%(disk)s from the Virtual I/O Server."),
|
"%(disk)s from the Virtual I/O Server.",
|
||||||
{'disk': device_name})
|
{'disk': device_name})
|
||||||
LOG.warning(e)
|
LOG.warning(e)
|
||||||
|
|
||||||
@@ -250,8 +247,8 @@ class VscsiVolumeAdapter(object):
|
|||||||
stg_ftsk = stg_ftsk or self.stg_ftsk
|
stg_ftsk = stg_ftsk or self.stg_ftsk
|
||||||
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
|
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("hdisk %(disk)s is not removed because it has "
|
LOG.info("hdisk %(disk)s is not removed because it has "
|
||||||
"existing storage mappings"), {'disk': device_name})
|
"existing storage mappings", {'disk': device_name})
|
||||||
|
|
||||||
def _check_host_mappings(self, vios_wrap, device_name):
|
def _check_host_mappings(self, vios_wrap, device_name):
|
||||||
"""Checks if the given hdisk has multiple mappings
|
"""Checks if the given hdisk has multiple mappings
|
||||||
@@ -268,7 +265,7 @@ class VscsiVolumeAdapter(object):
|
|||||||
vios_scsi_mappings, None,
|
vios_scsi_mappings, None,
|
||||||
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
|
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
|
||||||
|
|
||||||
LOG.info(_LI("%(num)d Storage Mappings found for %(dev)s"),
|
LOG.info("%(num)d Storage Mappings found for %(dev)s",
|
||||||
{'num': len(mappings), 'dev': device_name})
|
{'num': len(mappings), 'dev': device_name})
|
||||||
# the mapping is still present as the task feed removes
|
# the mapping is still present as the task feed removes
|
||||||
# the mapping later
|
# the mapping later
|
||||||
@@ -279,10 +276,10 @@ class VscsiVolumeAdapter(object):
|
|||||||
|
|
||||||
if not udid and not devname:
|
if not udid and not devname:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Could not remove hdisk for volume: %s'), self.volume_id)
|
'Could not remove hdisk for volume: %s', self.volume_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
LOG.info(_LI('Removing hdisk for udid: %s'), udid)
|
LOG.info('Removing hdisk for udid: %s', udid)
|
||||||
|
|
||||||
def find_hdisk_to_remove(vios_w):
|
def find_hdisk_to_remove(vios_w):
|
||||||
if devname is None:
|
if devname is None:
|
||||||
@@ -291,7 +288,7 @@ class VscsiVolumeAdapter(object):
|
|||||||
device_name = devname
|
device_name = devname
|
||||||
if device_name is None:
|
if device_name is None:
|
||||||
return
|
return
|
||||||
LOG.info(_LI('Removing %(hdisk)s from VIOS %(vios)s'),
|
LOG.info('Removing %(hdisk)s from VIOS %(vios)s',
|
||||||
{'hdisk': device_name, 'vios': vios_w.name})
|
{'hdisk': device_name, 'vios': vios_w.name})
|
||||||
self._add_remove_hdisk(vios_w, device_name,
|
self._add_remove_hdisk(vios_w, device_name,
|
||||||
stg_ftsk=rmv_hdisk_ftsk)
|
stg_ftsk=rmv_hdisk_ftsk)
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ from oslo_log import log as logging
|
|||||||
|
|
||||||
from nova_powervm import conf as cfg
|
from nova_powervm import conf as cfg
|
||||||
from nova_powervm.virt.powervm import exception as p_exc
|
from nova_powervm.virt.powervm import exception as p_exc
|
||||||
from nova_powervm.virt.powervm.i18n import _LI
|
|
||||||
from nova_powervm.virt.powervm.i18n import _LW
|
|
||||||
from nova_powervm.virt.powervm import vm
|
from nova_powervm.virt.powervm import vm
|
||||||
from nova_powervm.virt.powervm.volume import driver as v_driver
|
from nova_powervm.virt.powervm.volume import driver as v_driver
|
||||||
from nova_powervm.virt.powervm.volume import volume as volume
|
from nova_powervm.virt.powervm.volume import volume as volume
|
||||||
@@ -187,14 +185,14 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
|
|||||||
vios_w.uuid, itls)
|
vios_w.uuid, itls)
|
||||||
|
|
||||||
if hdisk.good_discovery(status, device_name):
|
if hdisk.good_discovery(status, device_name):
|
||||||
LOG.info(_LI('Discovered %(hdisk)s on vios %(vios)s for '
|
LOG.info('Discovered %(hdisk)s on vios %(vios)s for '
|
||||||
'volume %(volume_id)s. Status code: %(status)s.'),
|
'volume %(volume_id)s. Status code: %(status)s.',
|
||||||
{'hdisk': device_name, 'vios': vios_w.name,
|
{'hdisk': device_name, 'vios': vios_w.name,
|
||||||
'volume_id': volume_id, 'status': str(status)},
|
'volume_id': volume_id, 'status': str(status)},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
|
elif status == hdisk.LUAStatus.DEVICE_IN_USE:
|
||||||
LOG.warning(_LW('Discovered device %(dev)s for volume %(volume)s '
|
LOG.warning('Discovered device %(dev)s for volume %(volume)s '
|
||||||
'on %(vios)s is in use. Error code: %(status)s.'),
|
'on %(vios)s is in use. Error code: %(status)s.',
|
||||||
{'dev': device_name, 'volume': volume_id,
|
{'dev': device_name, 'volume': volume_id,
|
||||||
'vios': vios_w.name, 'status': str(status)},
|
'vios': vios_w.name, 'status': str(status)},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
@@ -274,28 +272,28 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
|
|||||||
# in the I/O Server. Subsequent scrub code on future
|
# in the I/O Server. Subsequent scrub code on future
|
||||||
# deploys will clean this up.
|
# deploys will clean this up.
|
||||||
if not hdisk.good_discovery(status, device_name):
|
if not hdisk.good_discovery(status, device_name):
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Disconnect Volume: The backing hdisk for volume "
|
"Disconnect Volume: The backing hdisk for volume "
|
||||||
"%(volume_id)s on Virtual I/O Server %(vios)s is "
|
"%(volume_id)s on Virtual I/O Server %(vios)s is "
|
||||||
"not in a valid state. This may be the result of "
|
"not in a valid state. This may be the result of "
|
||||||
"an evacuate."),
|
"an evacuate.",
|
||||||
{'volume_id': self.volume_id, 'vios': vios_w.name},
|
{'volume_id': self.volume_id, 'vios': vios_w.name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Disconnect Volume: Failed to find disk on Virtual I/O "
|
"Disconnect Volume: Failed to find disk on Virtual I/O "
|
||||||
"Server %(vios_name)s for volume %(volume_id)s. Volume "
|
"Server %(vios_name)s for volume %(volume_id)s. Volume "
|
||||||
"UDID: %(volume_uid)s. Error: %(error)s"),
|
"UDID: %(volume_uid)s. Error: %(error)s",
|
||||||
{'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
|
{'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
|
||||||
'volume_id': self.volume_id}, instance=self.instance)
|
'volume_id': self.volume_id}, instance=self.instance)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# We have found the device name
|
# We have found the device name
|
||||||
LOG.info(_LI("Disconnect Volume: Discovered the device %(hdisk)s "
|
LOG.info("Disconnect Volume: Discovered the device %(hdisk)s "
|
||||||
"on Virtual I/O Server %(vios_name)s for volume "
|
"on Virtual I/O Server %(vios_name)s for volume "
|
||||||
"%(volume_id)s. Volume UDID: %(volume_uid)s."),
|
"%(volume_id)s. Volume UDID: %(volume_uid)s.",
|
||||||
{'volume_uid': udid, 'volume_id': self.volume_id,
|
{'volume_uid': udid, 'volume_id': self.volume_id,
|
||||||
'vios_name': vios_w.name, 'hdisk': device_name},
|
'vios_name': vios_w.name, 'hdisk': device_name},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
@@ -326,9 +324,9 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
|
|||||||
# Warn if no hdisks disconnected.
|
# Warn if no hdisks disconnected.
|
||||||
if not any([result['vio_modified']
|
if not any([result['vio_modified']
|
||||||
for result in ret['wrapper_task_rets'].values()]):
|
for result in ret['wrapper_task_rets'].values()]):
|
||||||
LOG.warning(_LW("Disconnect Volume: Failed to disconnect the "
|
LOG.warning("Disconnect Volume: Failed to disconnect the "
|
||||||
"volume %(volume_id)s on ANY of the Virtual "
|
"volume %(volume_id)s on ANY of the Virtual "
|
||||||
"I/O Servers for instance %(inst)s."),
|
"I/O Servers for instance %(inst)s.",
|
||||||
{'inst': self.instance.name,
|
{'inst': self.instance.name,
|
||||||
'volume_id': self.volume_id},
|
'volume_id': self.volume_id},
|
||||||
instance=self.instance)
|
instance=self.instance)
|
||||||
|
|||||||
Reference in New Issue
Block a user