Merge "Clean up log messages"

This commit is contained in:
Jenkins 2017-06-28 20:47:57 +00:00 committed by Gerrit Code Review
commit 560953d1c8
27 changed files with 301 additions and 347 deletions

View File

@ -1,5 +1,5 @@
# Copyright 2013 OpenStack Foundation # Copyright 2013 OpenStack Foundation
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -204,12 +204,10 @@ class DiskAdapter(object):
made. made.
:raise InstanceDiskMappingFailed: If the mapping could not be done. :raise InstanceDiskMappingFailed: If the mapping could not be done.
""" """
msg_args = {'instance_name': instance.name}
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance) lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
for stg_elem, vios in self.instance_disk_iter(instance, for stg_elem, vios in self.instance_disk_iter(instance,
lpar_wrap=lpar_wrap): lpar_wrap=lpar_wrap):
msg_args['disk_name'] = stg_elem.name msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
msg_args['vios_name'] = vios.name
# Create a new mapping. NOTE: If there's an existing mapping on # Create a new mapping. NOTE: If there's an existing mapping on
# the other VIOS but not this one, we'll create a second mapping # the other VIOS but not this one, we'll create a second mapping
@ -218,23 +216,21 @@ class DiskAdapter(object):
# alternative would be always checking all VIOSes for existing # alternative would be always checking all VIOSes for existing
# mappings, which increases the response time of the common case by # mappings, which increases the response time of the common case by
# an entire GET of VIOS+VIO_SMAP. # an entire GET of VIOS+VIO_SMAP.
LOG.debug("Mapping boot disk %(disk_name)s of instance " LOG.debug("Mapping boot disk %(disk_name)s to the management "
"%(instance_name)s to the management partition from " "partition from Virtual I/O Server %(vios_name)s.",
"Virtual I/O Server %(vios_name)s.", msg_args) msg_args, instance=instance)
try: try:
tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid, tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid,
stg_elem) stg_elem)
# If that worked, we're done. add_vscsi_mapping logged. # If that worked, we're done. add_vscsi_mapping logged.
return stg_elem, vios return stg_elem, vios
except Exception as e: except Exception:
msg_args['exc'] = e LOG.exception("Failed to map boot disk %(disk_name)s to the "
LOG.warning("Failed to map boot disk %(disk_name)s of " "management partition from Virtual I/O Server "
"instance %(instance_name)s to the management " "%(vios_name)s.", msg_args, instance=instance)
"partition from Virtual I/O Server "
"%(vios_name)s: %(exc)s", msg_args)
# Try the next hit, if available. # Try the next hit, if available.
# We either didn't find the boot dev, or failed all attempts to map it. # We either didn't find the boot dev, or failed all attempts to map it.
raise npvmex.InstanceDiskMappingFailed(**msg_args) raise npvmex.InstanceDiskMappingFailed(instance_name=instance.name)
def disconnect_disk_from_mgmt(self, vios_uuid, disk_name): def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
"""Disconnect a disk from the management partition. """Disconnect a disk from the management partition.

View File

@ -1,5 +1,5 @@
# Copyright 2013 OpenStack Foundation # Copyright 2013 OpenStack Foundation
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -155,8 +155,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Make sure the remove function will run within the transaction manager # Make sure the remove function will run within the transaction manager
def rm_func(vios_w): def rm_func(vios_w):
LOG.info("Disconnecting instance %(inst)s from storage " LOG.info("Disconnecting instance from storage disks.",
"disks.", {'inst': instance.name}) instance=instance)
return tsk_map.remove_maps(vios_w, lpar_uuid, return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func) match_func=match_func)
@ -269,9 +269,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP]) self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
def add_func(vios_w): def add_func(vios_w):
LOG.info("Adding logical volume disk connection between VM " LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
"%(vm)s and VIOS %(vios)s.", {'vios': vios_w.name}, instance=instance)
{'vm': instance.name, 'vios': vios_w.name})
mapping = tsk_map.build_vscsi_mapping( mapping = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, lpar_uuid, disk_info) self.host_uuid, vios_w, lpar_uuid, disk_info)
return tsk_map.add_map(vios_w, mapping) return tsk_map.add_map(vios_w, mapping)
@ -282,15 +281,16 @@ class LocalStorage(disk_dvr.DiskAdapter):
if stg_ftsk.name == 'localdisk': if stg_ftsk.name == 'localdisk':
stg_ftsk.execute() stg_ftsk.execute()
def _validate_resizable(self, vdisk): @staticmethod
def _validate_resizable(vdisk):
"""Validates that VDisk supports resizing """Validates that VDisk supports resizing
:param vdisk: The VDisk to be resized :param vdisk: The VDisk to be resized
:raise ResizeError: If resizing is not supported for the given VDisk. :raise ResizeError: If resizing is not supported for the given VDisk.
""" """
if (vdisk.backstore_type == pvm_stg.BackStoreType.USER_QCOW): if vdisk.backstore_type == pvm_stg.BackStoreType.USER_QCOW:
raise nova_exc.ResizeError( raise nova_exc.ResizeError(
reason=_("Resizing file-backed instances is not currently" reason=_("Resizing file-backed instances is not currently "
"supported.")) "supported."))
def extend_disk(self, instance, disk_info, size): def extend_disk(self, instance, disk_info, size):
@ -327,7 +327,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Get the disk name based on the instance and type # Get the disk name based on the instance and type
vol_name = self._get_disk_name(disk_info['type'], instance, short=True) vol_name = self._get_disk_name(disk_info['type'], instance, short=True)
LOG.info('Extending disk: %s', vol_name) LOG.info('Extending disk: %s', vol_name, instance=instance)
try: try:
_extend() _extend()
except pvm_exc.Error: except pvm_exc.Error:

View File

@ -69,9 +69,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
self.ssp_name = self._ssp.name self.ssp_name = self._ssp.name
self.tier_name = self._tier.name self.tier_name = self._tier.name
LOG.info("SSP Storage driver initialized. " LOG.info("SSP Storage driver initialized. Cluster '%(clust_name)s'; "
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'; " "SSP '%(ssp_name)s'; Tier '%(tier_name)s",
"Tier '%(tier_name)s",
{'clust_name': self.clust_name, 'ssp_name': self.ssp_name, {'clust_name': self.clust_name, 'ssp_name': self.ssp_name,
'tier_name': self.tier_name}) 'tier_name': self.tier_name})
@ -140,9 +139,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# Delay run function to remove the mapping between the VM and the LU # Delay run function to remove the mapping between the VM and the LU
def rm_func(vios_w): def rm_func(vios_w):
LOG.info("Removing SSP disk connection between VM %(vm)s and " LOG.info("Removing SSP disk connection to VIOS %(vios)s.",
"VIOS %(vios)s.", {'vios': vios_w.name}, instance=instance)
{'vm': instance.name, 'vios': vios_w.name})
return tsk_map.remove_maps(vios_w, lpar_uuid, return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func) match_func=match_func)
@ -200,7 +198,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
def _create_disk_from_image(self, context, instance, image_meta, def _create_disk_from_image(self, context, instance, image_meta,
image_type=disk_drv.DiskType.BOOT): image_type=disk_drv.DiskType.BOOT):
"""Creates a boot disk and links the specified image to it. """Creates a disk and copies the specified image to it.
If the specified image has not already been uploaded, an Image LU is If the specified image has not already been uploaded, an Image LU is
created for it. A Disk LU is then created for the instance and linked created for it. A Disk LU is then created for the instance and linked
@ -213,10 +211,9 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
:param image_type: The image type. See disk_drv.DiskType. :param image_type: The image type. See disk_drv.DiskType.
:return: The backing pypowervm LU storage object that was created. :return: The backing pypowervm LU storage object that was created.
""" """
LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s ' LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s.',
'for instance %(instance_uuid)s.', dict(image_type=image_type, image_id=image_meta.id),
dict(image_type=image_type, image_id=image_meta.id, instance=instance)
instance_uuid=instance.uuid))
image_lu = tsk_cs.get_or_upload_image_lu( image_lu = tsk_cs.get_or_upload_image_lu(
self._tier, self._get_image_name(image_meta), self._tier, self._get_image_name(image_meta),
@ -225,7 +222,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM) image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM)
boot_lu_name = self._get_disk_name(image_type, instance) boot_lu_name = self._get_disk_name(image_type, instance)
LOG.info('SSP: Disk name is %s', boot_lu_name) LOG.info('SSP: Disk name is %s', boot_lu_name, instance=instance)
return tsk_stg.crt_lu( return tsk_stg.crt_lu(
self._tier, boot_lu_name, instance.flavor.root_gb, self._tier, boot_lu_name, instance.flavor.root_gb,
@ -262,9 +259,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# This is the delay apply mapping # This is the delay apply mapping
def add_func(vios_w): def add_func(vios_w):
LOG.info("Adding SSP disk connection between VM %(vm)s and " LOG.info("Adding SSP disk connection to VIOS %(vios)s.",
"VIOS %(vios)s.", {'vios': vios_w.name}, instance=instance)
{'vm': instance.name, 'vios': vios_w.name})
mapping = tsk_map.build_vscsi_mapping( mapping = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, lpar_uuid, lu) self.host_uuid, vios_w, lpar_uuid, lu)
return tsk_map.add_map(vios_w, mapping) return tsk_map.add_map(vios_w, mapping)

View File

@ -219,8 +219,7 @@ class PowerVMDriver(driver.ComputeDriver):
LOG.info('Operation: %(op)s. Virtual machine display name: ' LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s', '%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name, {'op': op, 'display_name': instance.display_name,
'name': instance.name}, 'name': instance.name}, instance=instance)
instance=instance)
def get_info(self, instance): def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!) """Get the current status of an instance, by name (not ID!)
@ -627,7 +626,7 @@ class PowerVMDriver(driver.ComputeDriver):
"%(lpar_uuid)s from VIOS %(vios_name)s.", "%(lpar_uuid)s from VIOS %(vios_name)s.",
{'num_maps': len(removals), {'num_maps': len(removals),
'lpar_uuid': pvm_inst_uuid, 'lpar_uuid': pvm_inst_uuid,
'vios_name': vwrap.name}) 'vios_name': vwrap.name}, instance=instance)
return removals return removals
stg_ftsk.add_functor_subtask(_rm_vscsi_maps) stg_ftsk.add_functor_subtask(_rm_vscsi_maps)
@ -852,12 +851,12 @@ class PowerVMDriver(driver.ComputeDriver):
# Power Off the LPAR # Power Off the LPAR
flow.add(tf_vm.PowerOff(self.adapter, instance)) flow.add(tf_vm.PowerOff(self.adapter, instance))
# Creates the boot image. # Create and populate the rescue disk.
flow.add(tf_stg.CreateDiskForImg( flow.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta, self.disk_dvr, context, instance, image_meta,
image_type=disk_dvr.DiskType.RESCUE)) image_type=disk_dvr.DiskType.RESCUE))
# Connects up the disk to the LPAR # Connect up the disk to the LPAR
flow.add(tf_stg.ConnectDisk(self.disk_dvr, instance)) flow.add(tf_stg.ConnectDisk(self.disk_dvr, instance))
# Last step is to power on the system. # Last step is to power on the system.
@ -1774,8 +1773,8 @@ class PowerVMDriver(driver.ComputeDriver):
vol_cls = vol_attach.get_volume_class(driver_type) vol_cls = vol_attach.get_volume_class(driver_type)
xags.update(set(vol_cls.min_xags())) xags.update(set(vol_cls.min_xags()))
LOG.debug('Instance XAGs for VM %(inst)s is %(xags)s.', LOG.debug('Instance XAGs: %(xags)s.', {'xags': ','.join(xags)},
{'inst': instance.name, 'xags': ','.join(xags)}) instance=instance)
return list(xags) return list(xags)
def _get_boot_connectivity_type(self, context, bdms, block_device_info): def _get_boot_connectivity_type(self, context, bdms, block_device_info):
@ -1798,7 +1797,8 @@ class PowerVMDriver(driver.ComputeDriver):
else: else:
return boot_conn_type return boot_conn_type
def _get_connectivity_type(self, bdm): @staticmethod
def _get_connectivity_type(bdm):
conn_info = bdm.get('connection_info') conn_info = bdm.get('connection_info')
if 'connection-type' in conn_info['data']: if 'connection-type' in conn_info['data']:
connectivity_type = conn_info['data']['connection-type'] connectivity_type = conn_info['data']['connection-type']

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2016 IBM Corp. # Copyright 2014, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -99,7 +99,8 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
if inst is None: if inst is None:
return None return None
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid) LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid,
instance=inst)
self._driver.nvram_mgr.store(inst) self._driver.nvram_mgr.store(inst)
# If the state of the vm changed see if it should be handled # If the state of the vm changed see if it should be handled
@ -174,8 +175,8 @@ class PowerVMLifecycleEventHandler(object):
pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid, pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid,
'PartitionState') 'PartitionState')
except exception.InstanceNotFound: except exception.InstanceNotFound:
LOG.debug("Instance for LPAR %s was deleted while event was " LOG.debug("LPAR %s was deleted while event was delayed.", pvm_uuid,
"delayed.", pvm_uuid) instance=inst)
return return
LOG.debug('New state %s for partition %s', pvm_state, pvm_uuid, LOG.debug('New state %s for partition %s', pvm_state, pvm_uuid,

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2015 IBM Corp. # Copyright 2014, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #

View File

@ -35,6 +35,7 @@ from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm import vif from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -142,9 +143,8 @@ class LiveMigrationDest(LiveMigration):
:param migrate_data: a PowerVMLiveMigrateData object :param migrate_data: a PowerVMLiveMigrateData object
:param vol_drvs: volume drivers for the attached volumes :param vol_drvs: volume drivers for the attached volumes
""" """
LOG.debug('Running pre live migration on destination.', LOG.debug('Running pre live migration on destination. Migration data: '
instance=self.instance) '%s', migrate_data, instance=self.instance)
LOG.debug('Migration data: %s', migrate_data, instance=self.instance)
# Set the ssh auth key. # Set the ssh auth key.
mgmt_task.add_authorized_key(self.drvr.adapter, mgmt_task.add_authorized_key(self.drvr.adapter,
@ -204,7 +204,7 @@ class LiveMigrationDest(LiveMigration):
try: try:
vol_drv.post_live_migration_at_destination(mig_vol_stor) vol_drv.post_live_migration_at_destination(mig_vol_stor)
except Exception: except Exception:
LOG.exception("PowerVM error cleaning up destincation host " LOG.exception("PowerVM error cleaning up destination host "
"after migration.", instance=self.instance) "after migration.", instance=self.instance)
# It failed. # It failed.
raise LiveMigrationVolume( raise LiveMigrationVolume(
@ -270,7 +270,8 @@ class LiveMigrationSrc(LiveMigration):
lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance) lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
self.lpar_w = lpar_w self.lpar_w = lpar_w
LOG.debug('Dest Migration data: %s', self.mig_data) LOG.debug('Dest Migration data: %s', self.mig_data,
instance=self.instance)
# Check proc compatibility modes # Check proc compatibility modes
if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
@ -307,7 +308,7 @@ class LiveMigrationSrc(LiveMigration):
vol_drv.pre_live_migration_on_source(vol_data) vol_drv.pre_live_migration_on_source(vol_data)
self.mig_data.vol_data = vol_data self.mig_data.vol_data = vol_data
LOG.debug('Src Migration data: %s', self.mig_data, LOG.debug('Source migration data: %s', self.mig_data,
instance=self.instance) instance=self.instance)
# Create a FeedTask to scrub any orphaned mappings/storage associated # Create a FeedTask to scrub any orphaned mappings/storage associated
@ -333,8 +334,8 @@ class LiveMigrationSrc(LiveMigration):
:param context: security context :param context: security context
:param migrate_data: a PowerVMLiveMigrateData object :param migrate_data: a PowerVMLiveMigrateData object
""" """
LOG.debug("Starting migration.", instance=self.instance) LOG.debug("Starting migration. Migrate data: %s", migrate_data,
LOG.debug("Migrate data: %s", migrate_data, instance=self.instance) instance=self.instance)
# The passed in mig data has more info (dest data added), so replace # The passed in mig data has more info (dest data added), so replace
self.mig_data = migrate_data self.mig_data = migrate_data
@ -376,8 +377,7 @@ class LiveMigrationSrc(LiveMigration):
trunk_to_del.delete() trunk_to_del.delete()
except Exception: except Exception:
with excutils.save_and_reraise_exception(logger=LOG): with excutils.save_and_reraise_exception(logger=LOG):
LOG.exception("Live migration failed.", LOG.exception("Live migration failed.", instance=self.instance)
instance=self.instance)
finally: finally:
LOG.debug("Finished migration.", instance=self.instance) LOG.debug("Finished migration.", instance=self.instance)
@ -439,9 +439,8 @@ class LiveMigrationSrc(LiveMigration):
if self.lpar_w.migration_state != 'Not_Migrating': if self.lpar_w.migration_state != 'Not_Migrating':
self.migration_recover() self.migration_recover()
except Exception as ex: except Exception:
LOG.error("Migration recover failed with error: %s", ex, LOG.exception("Migration rollback failed.", instance=self.instance)
instance=self.instance)
finally: finally:
LOG.debug("Finished migration rollback.", instance=self.instance) LOG.debug("Finished migration rollback.", instance=self.instance)

View File

@ -37,6 +37,7 @@ from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm import conf as cfg from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import vm from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -97,8 +98,7 @@ class ConfigDrivePowerVM(object):
:return iso_path: The path to the ISO :return iso_path: The path to the ISO
:return file_name: The file name for the ISO :return file_name: The file name for the ISO
""" """
LOG.info("Creating config drive for instance: %s", instance.name, LOG.info("Creating config drive.", instance=instance)
instance=instance)
extra_md = {} extra_md = {}
if admin_pass is not None: if admin_pass is not None:
extra_md['admin_pass'] = admin_pass extra_md['admin_pass'] = admin_pass
@ -121,10 +121,8 @@ class ConfigDrivePowerVM(object):
max_len=pvm_const.MaxLen.VOPT_NAME) max_len=pvm_const.MaxLen.VOPT_NAME)
iso_path = os.path.join(im_path, file_name) iso_path = os.path.join(im_path, file_name)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
LOG.info("Config drive ISO being built for instance %(inst)s " LOG.info("Config drive ISO building to path %(iso_path)s.",
"building to path %(iso_path)s.", {'iso_path': iso_path}, instance=instance)
{'inst': instance.name, 'iso_path': iso_path},
instance=instance)
# In case, if there's an OSError related failure while # In case, if there's an OSError related failure while
# creating config drive, retry make drive operation. # creating config drive, retry make drive operation.
@ -210,10 +208,8 @@ class ConfigDrivePowerVM(object):
# Define the function to build and add the mapping # Define the function to build and add the mapping
def add_func(vios_w): def add_func(vios_w):
LOG.info("Adding cfg drive mapping for instance %(inst)s for " LOG.info("Adding config drive mapping to Virtual I/O Server "
"Virtual I/O Server %(vios)s", "%(vios)s", {'vios': vios_w.name}, instance=instance)
{'inst': instance.name, 'vios': vios_w.name},
instance=instance)
mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w, mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
lpar_uuid, vopt) lpar_uuid, vopt)
return tsk_map.add_map(vios_w, mapping) return tsk_map.add_map(vios_w, mapping)

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -34,6 +34,7 @@ import retrying
from nova_powervm.virt.powervm import exception as npvmex from nova_powervm.virt.powervm import exception as npvmex
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_MP_UUID = None _MP_UUID = None

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp. # Copyright 2016, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -119,10 +119,9 @@ class NvramManager(object):
# Remove it from the store # Remove it from the store
try: try:
self._api.delete(instance) self._api.delete(instance)
except Exception as e: except Exception:
# Delete exceptions should not end the operation # Delete exceptions should not end the operation
LOG.warning('Could not delete NVRAM: %s', e, LOG.exception('Could not delete NVRAM.', instance=instance)
instance=instance)
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST) @lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
def _add_to_list(self, instance): def _add_to_list(self, instance):
@ -166,7 +165,7 @@ class NvramManager(object):
self._pop_from_list(uuid=instance.uuid) self._pop_from_list(uuid=instance.uuid)
try: try:
LOG.debug('Updating NVRAM for instance: %s', instance.uuid) LOG.debug('Updating NVRAM.', instance=instance)
data = vm.get_instance_wrapper( data = vm.get_instance_wrapper(
self._adapter, instance, xag=[pvm_const.XAG.NVRAM]).nvram self._adapter, instance, xag=[pvm_const.XAG.NVRAM]).nvram
LOG.debug('NVRAM for instance: %s', data, instance=instance) LOG.debug('NVRAM for instance: %s', data, instance=instance)

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp. # Copyright 2016, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -33,6 +33,7 @@ from oslo_utils import excutils
from swiftclient import exceptions as swft_exc from swiftclient import exceptions as swft_exc
from swiftclient import service as swft_srv from swiftclient import service as swft_srv
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -126,7 +127,8 @@ class SwiftNvramStore(api.NvramStore):
def _store(self, inst_key, inst_name, data, exists=None): def _store(self, inst_key, inst_name, data, exists=None):
"""Store the NVRAM into the storage service. """Store the NVRAM into the storage service.
:param instance: instance object :param inst_key: The key by which to store the data in the repository.
:param inst_name: The name of the instance.
:param data: the NVRAM data base64 encoded string :param data: the NVRAM data base64 encoded string
:param exists: (Optional, Default: None) If specified, tells the upload :param exists: (Optional, Default: None) If specified, tells the upload
whether or not the object exists. Should be a boolean whether or not the object exists. Should be a boolean
@ -216,12 +218,11 @@ class SwiftNvramStore(api.NvramStore):
data = data.encode('ascii') data = data.encode('ascii')
md5 = hashlib.md5(data).hexdigest() md5 = hashlib.md5(data).hexdigest()
if existing_hash == md5: if existing_hash == md5:
LOG.info('NVRAM has not changed for instance: %s', LOG.info('NVRAM has not changed.', instance=instance)
instance.name, instance=instance)
return return
self._store(instance.uuid, instance.name, data, exists=exists) self._store(instance.uuid, instance.name, data, exists=exists)
LOG.debug('NVRAM updated for instance: %s', instance.name) LOG.debug('NVRAM updated', instance=instance)
def store_slot_map(self, inst_key, data): def store_slot_map(self, inst_key, data):
"""Store the Slot Map to Swift. """Store the Slot Map to Swift.

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp. # Copyright 2016, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -24,6 +24,7 @@ from pypowervm.tasks import storage as pvm_tstor
from nova_powervm.virt.powervm import exception as p_exc from nova_powervm.virt.powervm import exception as p_exc
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
_SLOT_KEY = "CLIENT_SLOT_DATA" _SLOT_KEY = "CLIENT_SLOT_DATA"
@ -177,12 +178,10 @@ class SwiftSlotManager(NovaSlotManager):
self.store_api.delete_slot_map(key) self.store_api.delete_slot_map(key)
except Exception: except Exception:
LOG.warning("Unable to delete the slot map from Swift backing " LOG.warning("Unable to delete the slot map from Swift backing "
"store with ID %(key)s. Will require " "store with ID %(key)s. Will require manual cleanup.",
"manual cleanup.", {'key': key}, {'key': key}, instance=self.instance)
instance=self.instance)
class NoopSlotManager(NovaSlotManager): class NoopSlotManager(NovaSlotManager):
"""No op Slot Map (for when Swift is not used - which is standard).""" """No op Slot Map (for when Swift is not used - which is standard)."""
pass pass

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -19,6 +19,7 @@ from taskflow import task
from nova_powervm.virt.powervm import image from nova_powervm.virt.powervm import image
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -73,10 +74,9 @@ class StreamToGlance(task.Task):
def execute(self, disk_path): def execute(self, disk_path):
metadata = image.snapshot_metadata(self.context, self.image_api, metadata = image.snapshot_metadata(self.context, self.image_api,
self.image_id, self.instance) self.image_id, self.instance)
LOG.info("Starting stream of boot device for instance %(inst)s " LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
"(local blockdev %(devpath)s) to glance image " "to glance image %(img_id)s.",
"%(img_id)s.", {'devpath': disk_path, 'img_id': self.image_id},
{'inst': self.instance.name, 'devpath': disk_path, instance=self.instance)
'img_id': self.image_id}, instance=self.instance)
image.stream_blockdev_to_glance(self.context, self.image_api, image.stream_blockdev_to_glance(self.context, self.image_api,
self.image_id, metadata, disk_path) self.image_id, metadata, disk_path)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -27,6 +27,7 @@ from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import vif from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -60,11 +61,9 @@ class UnplugVifs(task.Task):
# error up front. # error up front.
modifiable, reason = lpar_wrap.can_modify_io() modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable: if not modifiable:
LOG.error('Unable to remove VIFs from instance %(inst)s ' LOG.error("Unable to remove VIFs in the instance's current state. "
'because the system is not in a correct state. ' "The reason reported by the system is: %(reason)s",
'The reason reported by the system is: %(reason)s', {'reason': reason}, instance=self.instance)
{'inst': self.instance.name, 'reason': reason},
instance=self.instance)
raise exception.VirtualInterfaceUnplugException(reason=reason) raise exception.VirtualInterfaceUnplugException(reason=reason)
# Get all the current Client Network Adapters (CNA) on the VM itself. # Get all the current Client Network Adapters (CNA) on the VM itself.
@ -149,12 +148,9 @@ class PlugVifs(task.Task):
# Check to see if the LPAR is OK to add VIFs to. # Check to see if the LPAR is OK to add VIFs to.
modifiable, reason = lpar_wrap.can_modify_io() modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable and self.crt_network_infos: if not modifiable and self.crt_network_infos:
LOG.error('Unable to create VIF(s) for instance %(sys)s. The ' LOG.error("Unable to create VIF(s) in the instance's current "
'VM was in a state where VIF plugging is not ' "state. The reason reported by the system is: "
'acceptable. The reason from the system is: ' "%(reason)s", {'reason': reason}, instance=self.instance)
'%(reason)s',
{'sys': self.instance.name, 'reason': reason},
instance=self.instance)
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
# TODO(KYLEH): We're setting up to wait for an instance event. The # TODO(KYLEH): We're setting up to wait for an instance event. The
@ -177,9 +173,8 @@ class PlugVifs(task.Task):
# not wait for the neutron event as that likely won't be sent (it was # not wait for the neutron event as that likely won't be sent (it was
# already done). # already done).
for network_info in self.update_network_infos: for network_info in self.update_network_infos:
LOG.info("Updating VIF with mac %(mac)s for instance %(sys)s", LOG.info("Updating VIF with mac %(mac)s",
{'mac': network_info['address'], {'mac': network_info['address']}, instance=self.instance)
'sys': self.instance.name}, instance=self.instance)
vif.plug(self.adapter, self.host_uuid, self.instance, vif.plug(self.adapter, self.host_uuid, self.instance,
network_info, self.slot_mgr, new_vif=False) network_info, self.slot_mgr, new_vif=False)
@ -190,10 +185,8 @@ class PlugVifs(task.Task):
deadline=CONF.vif_plugging_timeout, deadline=CONF.vif_plugging_timeout,
error_callback=self._vif_callback_failed): error_callback=self._vif_callback_failed):
for network_info in self.crt_network_infos: for network_info in self.crt_network_infos:
LOG.info('Creating VIF with mac %(mac)s for instance ' LOG.info('Creating VIF with mac %(mac)s.',
'%(sys)s', {'mac': network_info['address']},
{'mac': network_info['address'],
'sys': self.instance.name},
instance=self.instance) instance=self.instance)
new_vif = vif.plug( new_vif = vif.plug(
self.adapter, self.host_uuid, self.instance, self.adapter, self.host_uuid, self.instance,
@ -202,8 +195,7 @@ class PlugVifs(task.Task):
pvm_net.CNA): pvm_net.CNA):
self.cnas.append(new_vif) self.cnas.append(new_vif)
except eventlet.timeout.Timeout: except eventlet.timeout.Timeout:
LOG.error('Error waiting for VIF to be created for instance ' LOG.error('Error waiting for VIF to be created.',
'%(sys)s', {'sys': self.instance.name},
instance=self.instance) instance=self.instance)
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
finally: finally:
@ -216,9 +208,8 @@ class PlugVifs(task.Task):
return self.cnas return self.cnas
def _vif_callback_failed(self, event_name, instance): def _vif_callback_failed(self, event_name, instance):
LOG.error('VIF Plug failure for callback on event ' LOG.error('VIF plug failure for callback on event %(event)s.',
'%(event)s for instance %(uuid)s', {'event': event_name}, instance=instance)
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal: if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException() raise exception.VirtualInterfaceCreateException()
@ -247,8 +238,8 @@ class PlugVifs(task.Task):
# The parameters have to match the execute method, plus the response + # The parameters have to match the execute method, plus the response +
# failures even if only a subset are used. # failures even if only a subset are used.
LOG.warning('VIF creation being rolled back for instance %(inst)s', LOG.warning('VIF creation is being rolled back.',
{'inst': self.instance.name}, instance=self.instance) instance=self.instance)
# Get the current adapters on the system # Get the current adapters on the system
cna_w_list = vm.get_cnas(self.adapter, self.instance) cna_w_list = vm.get_cnas(self.adapter, self.instance)
@ -293,20 +284,19 @@ class PlugMgmtVif(task.Task):
# return None because the Config Drive step (which may be used...may # return None because the Config Drive step (which may be used...may
# not be) required the mgmt vif. # not be) required the mgmt vif.
if not CONF.powervm.use_rmc_mgmt_vif: if not CONF.powervm.use_rmc_mgmt_vif:
LOG.debug('No management VIF created for instance %s as the conf ' LOG.debug('No management VIF created because '
'option use_rmc_mgmt_vif is set to False', 'CONF.powervm.use_rmc_mgmt_vif is False',
self.instance.name) instance=self.instance)
return None return None
LOG.info('Plugging the Management Network Interface to instance %s', LOG.info('Plugging the management network interface.',
self.instance.name, instance=self.instance) instance=self.instance)
# Determine if we need to create the secure RMC VIF. This should only # Determine if we need to create the secure RMC VIF. This should only
# be needed if there is not a VIF on the secure RMC vSwitch # be needed if there is not a VIF on the secure RMC vSwitch
vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid) vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
if vswitch is None: if vswitch is None:
LOG.warning('No management VIF created for instance %s due to ' LOG.warning('No management VIF created due to lack of management '
'lack of Management Virtual Switch', 'virtual switch', instance=self.instance)
self.instance.name)
return None return None
# This next check verifies that there are no existing NICs on the # This next check verifies that there are no existing NICs on the
@ -318,8 +308,7 @@ class PlugMgmtVif(task.Task):
has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas] has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas]
if has_mgmt_vif: if has_mgmt_vif:
LOG.debug('Management VIF already created for instance %s', LOG.debug('Management VIF already exists.', instance=self.instance)
self.instance.name)
return None return None
# Return the created management CNA # Return the created management CNA

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp. # Copyright 2016, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -18,6 +18,7 @@
from oslo_log import log as logging from oslo_log import log as logging
from taskflow import task from taskflow import task
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -42,8 +43,8 @@ class SaveSlotStore(task.Task):
super(SaveSlotStore, self).__init__('save_slot_store') super(SaveSlotStore, self).__init__('save_slot_store')
def execute(self): def execute(self):
LOG.debug("Topology for instance %(inst)s: %(topo)s", LOG.debug("Topology: %(topo)s", {'topo': self.slot_mgr.topology},
{'inst': self.instance.name, 'topo': self.slot_mgr.topology}) instance=self.instance)
self.slot_mgr.save() self.slot_mgr.save()

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import six
from pypowervm.tasks import scsi_mapper as pvm_smap from pypowervm.tasks import scsi_mapper as pvm_smap
from oslo_log import log as logging from oslo_log import log as logging
@ -27,6 +25,7 @@ from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm import media from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm import mgmt from nova_powervm.virt.powervm import mgmt
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -49,15 +48,15 @@ class ConnectVolume(task.Task):
super(ConnectVolume, self).__init__('connect_vol_%s' % self.vol_id) super(ConnectVolume, self).__init__('connect_vol_%s' % self.vol_id)
def execute(self): def execute(self):
LOG.info('Connecting volume %(vol)s to instance %(inst)s', LOG.info('Connecting volume %(vol)s.', {'vol': self.vol_id},
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name}) instance=self.vol_drv.instance)
self.vol_drv.connect_volume(self.slot_mgr) self.vol_drv.connect_volume(self.slot_mgr)
def revert(self, result, flow_failures): def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response + # The parameters have to match the execute method, plus the response +
# failures even if only a subset are used. # failures even if only a subset are used.
LOG.warning('Volume %(vol)s for instance %(inst)s to be disconnected', LOG.warning('Rolling back connection for volume %(vol)s.',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name}) {'vol': self.vol_id}, instance=self.vol_drv.instance)
# Note that the rollback is *instant*. Resetting the FeedTask ensures # Note that the rollback is *instant*. Resetting the FeedTask ensures
# immediate rollback. # immediate rollback.
@ -68,13 +67,11 @@ class ConnectVolume(task.Task):
# was connected. This attempts to clear anything out to make sure # was connected. This attempts to clear anything out to make sure
# the terminate connection runs smoothly. # the terminate connection runs smoothly.
self.vol_drv.disconnect_volume(self.slot_mgr) self.vol_drv.disconnect_volume(self.slot_mgr)
except npvmex.VolumeDetachFailed as e: except npvmex.VolumeDetachFailed:
# Only log that the volume detach failed. Should not be blocking # Only log that the volume detach failed. Should not be blocking
# due to being in the revert flow. # due to being in the revert flow.
LOG.warning("Unable to disconnect volume for %(inst)s during " LOG.exception("Unable to disconnect volume %s during rollback.",
"rollback. Error was: %(error)s", self.vol_id, instance=self.vol_drv.instance)
{'inst': self.vol_drv.instance.name,
'error': e.message})
class DisconnectVolume(task.Task): class DisconnectVolume(task.Task):
@ -97,15 +94,15 @@ class DisconnectVolume(task.Task):
'disconnect_vol_%s' % self.vol_id) 'disconnect_vol_%s' % self.vol_id)
def execute(self): def execute(self):
LOG.info('Disconnecting volume %(vol)s from instance %(inst)s', LOG.info('Disconnecting volume %(vol)s.',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name}) {'vol': self.vol_id}, instance=self.vol_drv.instance)
self.vol_drv.disconnect_volume(self.slot_mgr) self.vol_drv.disconnect_volume(self.slot_mgr)
def revert(self, result, flow_failures): def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response + # The parameters have to match the execute method, plus the response +
# failures even if only a subset are used. # failures even if only a subset are used.
LOG.warning('Volume %(vol)s for instance %(inst)s to be re-connected', LOG.warning('Reconnecting volume %(vol)s on disconnect rollback.',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name}) {'vol': self.vol_id}, instance=self.vol_drv.instance)
# Note that the rollback is *instant*. Resetting the FeedTask ensures # Note that the rollback is *instant*. Resetting the FeedTask ensures
# immediate rollback. # immediate rollback.
@ -117,13 +114,11 @@ class DisconnectVolume(task.Task):
# in error scenarios. This is simply useful for debug purposes # in error scenarios. This is simply useful for debug purposes
# if there is an operational error. # if there is an operational error.
self.vol_drv.connect_volume(self.slot_mgr) self.vol_drv.connect_volume(self.slot_mgr)
except npvmex.VolumeAttachFailed as e: except npvmex.VolumeAttachFailed:
# Only log that the volume attach failed. Should not be blocking # Only log that the volume attach failed. Should not be blocking
# due to being in the revert flow. See comment above. # due to being in the revert flow. See comment above.
LOG.warning("Unable to re-connect volume for %(inst)s during " LOG.exception("Unable to reconnect volume %s during rollback.",
"rollback. Error was: %(error)s", self.vol_id, instance=self.vol_drv.instance)
{'inst': self.vol_drv.instance.name,
'error': e.message})
class CreateDiskForImg(task.Task): class CreateDiskForImg(task.Task):
@ -260,7 +255,7 @@ class InstanceDiskToMgmt(task.Task):
# Scan the SCSI bus, discover the disk, find its canonical path. # Scan the SCSI bus, discover the disk, find its canonical path.
LOG.info("Discovering device and path for mapping of %(dev_name)s " LOG.info("Discovering device and path for mapping of %(dev_name)s "
"on the management partition.", "on the management partition.",
{'dev_name': self.stg_elem.name}) {'dev_name': self.stg_elem.name}, instance=self.instance)
self.disk_path = mgmt.discover_vscsi_disk(the_map) self.disk_path = mgmt.discover_vscsi_disk(the_map)
return self.stg_elem, self.vios_wrap, self.disk_path return self.stg_elem, self.vios_wrap, self.disk_path
@ -273,20 +268,18 @@ class InstanceDiskToMgmt(task.Task):
if self.vios_wrap is None or self.stg_elem is None: if self.vios_wrap is None or self.stg_elem is None:
# We never even got connected - nothing to do # We never even got connected - nothing to do
return return
LOG.warning("Unmapping boot disk %(disk_name)s of instance " LOG.warning("Unmapping boot disk %(disk_name)s from the management "
"%(instance_name)s from management partition via " "partition via Virtual I/O Server %(vioname)s.",
"Virtual I/O Server %(vios_name)s.",
{'disk_name': self.stg_elem.name, {'disk_name': self.stg_elem.name,
'instance_name': self.instance.name, 'vioname': self.vios_wrap.name}, instance=self.instance)
'vios_name': self.vios_wrap.name})
self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid, self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
self.stg_elem.name) self.stg_elem.name)
if self.disk_path is None: if self.disk_path is None:
# We did not discover the disk - nothing else to do. # We did not discover the disk - nothing else to do.
return return
LOG.warning("Removing disk %(disk_path)s from the management " LOG.warning("Removing disk %(dpath)s from the management partition.",
"partition.", {'disk_path': self.disk_path}) {'dpath': self.disk_path}, instance=self.instance)
mgmt.remove_block_dev(self.disk_path) mgmt.remove_block_dev(self.disk_path)
@ -330,15 +323,13 @@ class RemoveInstanceDiskFromMgmt(task.Task):
# stg_elem is None if boot disk was not mapped to management partition # stg_elem is None if boot disk was not mapped to management partition
if stg_elem is None: if stg_elem is None:
return return
LOG.info("Unmapping boot disk %(disk_name)s of instance " LOG.info("Unmapping boot disk %(disk_name)s from the management "
"%(instance_name)s from management partition via Virtual " "partition via Virtual I/O Server %(vios_name)s.",
"I/O Server %(vios_name)s.", {'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
{'disk_name': stg_elem.name, instance=self.instance)
'instance_name': self.instance.name,
'vios_name': vios_wrap.name})
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name) self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
LOG.info("Removing disk %(disk_path)s from the management " LOG.info("Removing disk %(disk_path)s from the management partition.",
"partition.", {'disk_path': disk_path}) {'disk_path': disk_path}, instance=self.instance)
mgmt.remove_block_dev(disk_path) mgmt.remove_block_dev(disk_path)
@ -397,10 +388,9 @@ class CreateAndConnectCfgDrive(task.Task):
# Delete the virtual optical media. If it fails we don't care. # Delete the virtual optical media. If it fails we don't care.
try: try:
self.mb.dlt_vopt(lpar_wrap.uuid) self.mb.dlt_vopt(lpar_wrap.uuid)
except Exception as e: except Exception:
LOG.warning('Vopt removal as part of spawn reversion failed ' LOG.exception('VOpt removal (as part of reversion) failed.',
'with: %(exc)s', {'exc': six.text_type(e)}, instance=self.instance)
instance=self.instance)
class DeleteVOpt(task.Task): class DeleteVOpt(task.Task):
@ -501,9 +491,8 @@ class SaveBDM(task.Task):
super(SaveBDM, self).__init__('save_bdm_%s' % self.bdm.volume_id) super(SaveBDM, self).__init__('save_bdm_%s' % self.bdm.volume_id)
def execute(self): def execute(self):
LOG.info('Saving block device mapping for volume id %(vol_id)s ' LOG.info('Saving block device mapping for volume id %(vol_id)s.',
'on instance %(inst)s.', {'vol_id': self.bdm.volume_id}, instance=self.instance)
{'vol_id': self.bdm.volume_id, 'inst': self.instance.name})
self.bdm.save() self.bdm.save()
@ -558,6 +547,7 @@ class ExtendDisk(task.Task):
super(ExtendDisk, self).__init__('extend_disk_%s' % disk_info['type']) super(ExtendDisk, self).__init__('extend_disk_%s' % disk_info['type'])
def execute(self): def execute(self):
LOG.info('Extending disk size of disk: %(disk)s size: %(size)s.', LOG.info('Extending %(disk_type)s disk to %(size)s GB.',
{'disk': self.disk_info['type'], 'size': self.size}) {'disk_type': self.disk_info['type'], 'size': self.size},
instance=self.instance)
self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size) self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -98,8 +98,7 @@ class Create(task.Task):
def execute(self): def execute(self):
data = None data = None
if self.nvram_mgr is not None: if self.nvram_mgr is not None:
LOG.info('Fetching NVRAM for instance %s.', LOG.info('Fetching NVRAM.', instance=self.instance)
self.instance.name, instance=self.instance)
data = self.nvram_mgr.fetch(self.instance) data = self.nvram_mgr.fetch(self.instance)
LOG.debug('NVRAM data is: %s', data, instance=self.instance) LOG.debug('NVRAM data is: %s', data, instance=self.instance)
@ -114,8 +113,7 @@ class Create(task.Task):
# build map earlier in the spawn, just before the LPAR is created. # build map earlier in the spawn, just before the LPAR is created.
# Only rebuilds should be passing in None for stg_ftsk. # Only rebuilds should be passing in None for stg_ftsk.
if self.stg_ftsk.name == 'create_scrubber': if self.stg_ftsk.name == 'create_scrubber':
LOG.info('Scrubbing storage for instance %s as part of ' LOG.info('Scrubbing storage as part of rebuild.',
'rebuild.', self.instance.name,
instance=self.instance) instance=self.instance)
self.stg_ftsk.execute() self.stg_ftsk.execute()
@ -202,11 +200,12 @@ class PowerOn(task.Task):
vm.power_on(self.adapter, self.instance, opts=self.pwr_opts) vm.power_on(self.adapter, self.instance, opts=self.pwr_opts)
def revert(self, result, flow_failures): def revert(self, result, flow_failures):
LOG.warning('Powering off instance: %s', self.instance.name) LOG.warning('Rolling back power-on.', instance=self.instance)
if isinstance(result, task_fail.Failure): if isinstance(result, task_fail.Failure):
# The power on itself failed...can't power off. # The power on itself failed...can't power off.
LOG.debug('Power on failed. Not performing power off.') LOG.debug('Power on failed. Not performing power off.',
instance=self.instance)
return return
vm.power_off(self.adapter, self.instance, force_immediate=True) vm.power_off(self.adapter, self.instance, force_immediate=True)
@ -220,7 +219,6 @@ class PowerOff(task.Task):
"""Creates the Task to power off an LPAR. """Creates the Task to power off an LPAR.
:param adapter: The adapter for the pypowervm API :param adapter: The adapter for the pypowervm API
:param lpar_uuid: The UUID of the lpar that has media.
:param instance: The nova instance. :param instance: The nova instance.
:param force_immediate: Boolean. Perform a VSP hard power off. :param force_immediate: Boolean. Perform a VSP hard power off.
""" """
@ -257,8 +255,7 @@ class StoreNvram(task.Task):
try: try:
self.nvram_mgr.store(self.instance, immediate=self.immediate) self.nvram_mgr.store(self.instance, immediate=self.immediate)
except Exception: except Exception:
LOG.exception('Unable to store NVRAM for instance %(name)s.', LOG.exception('Unable to store NVRAM.', instance=self.instance)
{'name': self.instance.name}, instance=self.instance)
class DeleteNvram(task.Task): class DeleteNvram(task.Task):
@ -280,13 +277,11 @@ class DeleteNvram(task.Task):
LOG.info("No op for NVRAM delete.", instance=self.instance) LOG.info("No op for NVRAM delete.", instance=self.instance)
return return
LOG.info('Deleting NVRAM for instance: %s', LOG.info('Deleting NVRAM', instance=self.instance)
self.instance.name, instance=self.instance)
try: try:
self.nvram_mgr.remove(self.instance) self.nvram_mgr.remove(self.instance)
except Exception: except Exception:
LOG.exception('Unable to delete NVRAM for instance %(name)s.', LOG.exception('Unable to delete NVRAM.', instance=self.instance)
{'name': self.instance.name}, instance=self.instance)
class Delete(task.Task): class Delete(task.Task):
@ -297,7 +292,6 @@ class Delete(task.Task):
"""Create the Task to delete the VM from the system. """Create the Task to delete the VM from the system.
:param adapter: The adapter for the pypowervm API. :param adapter: The adapter for the pypowervm API.
:param lpar_uuid: The VM's PowerVM UUID.
:param instance: The nova instance. :param instance: The nova instance.
""" """
super(Delete, self).__init__('dlt_vm') super(Delete, self).__init__('dlt_vm')

View File

@ -42,6 +42,7 @@ from pypowervm.wrappers import network as pvm_net
from nova_powervm.virt.powervm.i18n import _ from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm import vm from nova_powervm.virt.powervm import vm
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
SECURE_RMC_VSWITCH = 'MGMTSWITCH' SECURE_RMC_VSWITCH = 'MGMTSWITCH'
@ -368,25 +369,18 @@ class PvmVifDriver(object):
cna_w = self._find_cna_for_vif(cna_w_list, vif) cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w: if not cna_w:
LOG.warning('Unable to unplug VIF with mac %(mac)s for ' LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was '
'instance %(inst)s. The VIF was not found on ' 'not found on the instance.',
'the instance.', {'mac': vif['address']}, instance=self.instance)
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
return None return None
LOG.info('Deleting VIF with mac %(mac)s for instance %(inst)s.', LOG.info('Deleting VIF with mac %(mac)s.',
{'mac': vif['address'], 'inst': self.instance.name}, {'mac': vif['address']}, instance=self.instance)
instance=self.instance)
try: try:
cna_w.delete() cna_w.delete()
except Exception as e: except Exception as e:
LOG.error('Unable to unplug VIF with mac %(mac)s for instance ' LOG.exception('Unable to unplug VIF with mac %(mac)s.',
'%(inst)s.', {'mac': vif['address']}, instance=self.instance)
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.exception("PowerVM error during vif unplug.",
instance=self.instance)
raise exception.VirtualInterfaceUnplugException( raise exception.VirtualInterfaceUnplugException(
reason=six.text_type(e)) reason=six.text_type(e))
return cna_w return cna_w
@ -485,7 +479,8 @@ class PvmSeaVifDriver(PvmVifDriver):
if not vlan: if not vlan:
vlan = int(vif['details']['vlan']) vlan = int(vif['details']['vlan'])
LOG.debug("Creating SEA based VIF with VLAN %s", str(vlan)) LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan),
instance=self.instance)
cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan, cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan,
mac_addr=vif['address'], slot_num=slot_num) mac_addr=vif['address'], slot_num=slot_num)
@ -600,11 +595,9 @@ class PvmLBVifDriver(PvmLioVifDriver):
# Find the CNA for this vif. # Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif) cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w: if not cna_w:
LOG.warning('Unable to unplug VIF with mac %(mac)s for ' LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was '
'instance %(inst)s. The VIF was not found on ' 'not found on the instance.',
'the instance.', {'mac': vif['address']}, instance=self.instance)
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
return None return None
# Find and delete the trunk adapters # Find and delete the trunk adapters
@ -615,12 +608,11 @@ class PvmLBVifDriver(PvmLioVifDriver):
try: try:
utils.execute('brctl', 'delif', vif['network']['bridge'], utils.execute('brctl', 'delif', vif['network']['bridge'],
dev_name, run_as_root=True) dev_name, run_as_root=True)
except Exception as e: except Exception:
LOG.warning('Unable to delete device %(dev_name)s from bridge ' LOG.exception(
'%(bridge)s. Error: %(error)s', 'Unable to delete device %(dev_name)s from bridge %(bridge)s.',
{'dev_name': dev_name, {'dev_name': dev_name, 'bridge': vif['network']['bridge']},
'bridge': vif['network']['bridge'], instance=self.instance)
'error': e.message}, instance=self.instance)
for trunk in trunks: for trunk in trunks:
trunk.delete() trunk.delete()
@ -648,10 +640,8 @@ class PvmVnicSriovVifDriver(PvmVifDriver):
network = napi.get(admin_context, net_id) network = napi.get(admin_context, net_id)
physnet = network.physical_network physnet = network.physical_network
LOG.debug("Plugging vNIC SR-IOV vif for physical network " LOG.debug("Plugging vNIC SR-IOV vif for physical network %(physnet)s.",
"'%(physnet)s' into instance %(inst)s.", {'physnet': physnet}, instance=self.instance)
{'physnet': physnet, 'inst': self.instance.name},
instance=self.instance)
# Get the msys # Get the msys
msys = pvm_ms.System.get(self.adapter)[0] msys = pvm_ms.System.get(self.adapter)[0]
@ -697,11 +687,9 @@ class PvmVnicSriovVifDriver(PvmVifDriver):
vnic = vm.get_vnics( vnic = vm.get_vnics(
self.adapter, self.instance, mac=mac, one_result=True) self.adapter, self.instance, mac=mac, one_result=True)
if not vnic: if not vnic:
LOG.warning('Unable to unplug VIF with mac %(mac)s for ' LOG.warning('Unable to unplug VIF with mac %(mac)s. No matching '
'instance %(inst)s. No matching vNIC was found ' 'vNIC was found on the instance. VIF: %(vif)s',
'on the instance. VIF: %(vif)s', {'mac': mac, 'vif': vif}, instance=self.instance)
{'mac': mac, 'inst': self.instance.name, 'vif': vif},
instance=self.instance)
return None return None
vnic.delete() vnic.delete()
return vnic return vnic
@ -770,11 +758,9 @@ class PvmOvsVifDriver(PvmLioVifDriver):
# Find the CNA for this vif. # Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif) cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w: if not cna_w:
LOG.warning('Unable to unplug VIF with mac %(mac)s for ' LOG.warning('Unable to unplug VIF with mac %(mac)s for. The VIF '
'instance %(inst)s. The VIF was not found on ' 'was not found on the instance.',
'the instance.', {'mac': vif['address']}, instance=self.instance)
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
return None return None
# Find and delete the trunk adapters # Find and delete the trunk adapters
@ -822,8 +808,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
# Save this data for the migration command. # Save this data for the migration command.
vea_vlan_mappings[vif['address']] = cna_w.pvid vea_vlan_mappings[vif['address']] = cna_w.pvid
LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with " LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with PVID "
"PVID %(pvid)s", "%(pvid)s",
{'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid}, {'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid},
instance=self.instance) instance=self.instance)

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2015, 2016 IBM Corp. # Copyright 2014, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -46,6 +46,7 @@ from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as nvex from nova_powervm.virt.powervm import exception as nvex
from nova_powervm.virt.powervm.i18n import _ from nova_powervm.virt.powervm.i18n import _
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -124,7 +125,8 @@ def translate_event(pvm_state, pwr_state):
elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING: elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING:
trans = event.EVENT_LIFECYCLE_RESUMED trans = event.EVENT_LIFECYCLE_RESUMED
LOG.debug('Translated Event to %s', trans) LOG.debug('Translated {PowerVM state %s; power state %s} to %s',
pvm_state, pwr_state, trans)
return trans return trans
@ -677,29 +679,27 @@ def power_off(adapter, instance, opts=None, force_immediate=False,
entry = get_instance_wrapper(adapter, instance) entry = get_instance_wrapper(adapter, instance)
# Get the current state and see if we can stop the VM # Get the current state and see if we can stop the VM
LOG.debug("Powering off request for instance %(inst)s which is in " LOG.debug("Power off requested for instance in state %(state)s. Force "
"state %(state)s. Force Immediate Flag: %(force)s.", "Immediate Flag: %(force)s.",
{'inst': instance.name, 'state': entry.state, {'state': entry.state, 'force': force_immediate},
'force': force_immediate}) instance=instance)
if entry.state in POWERVM_STOPABLE_STATE: if entry.state in POWERVM_STOPABLE_STATE:
# Now stop the lpar # Now stop the lpar
try: try:
LOG.debug("Power off executing for instance %(inst)s.", LOG.debug("Power off executing.", instance=instance)
{'inst': instance.name})
kwargs = {'timeout': timeout} if timeout else {} kwargs = {'timeout': timeout} if timeout else {}
force_flag = (power.Force.TRUE if force_immediate force_flag = (power.Force.TRUE if force_immediate
else power.Force.ON_FAILURE) else power.Force.ON_FAILURE)
power.power_off(entry, None, force_immediate=force_flag, power.power_off(entry, None, force_immediate=force_flag,
add_parms=opts, **kwargs) add_parms=opts, **kwargs)
except Exception as e: except Exception as e:
LOG.exception("Failed to power of instance.", LOG.exception("Failed to power off instance.",
instance=instance) instance=instance)
raise exception.InstancePowerOffFailure( raise exception.InstancePowerOffFailure(
reason=six.text_type(e)) reason=six.text_type(e))
return True return True
else: else:
LOG.debug("Power off not required for instance %(inst)s.", LOG.debug("Power off not required.", instance=instance)
{'inst': instance.name})
return False return False
@ -779,7 +779,7 @@ def get_instance(context, pvm_uuid):
except exception.InstanceNotFound: except exception.InstanceNotFound:
pass pass
except Exception as e: except Exception as e:
LOG.debug('PowerVM UUID not found. %s', e) LOG.debug('Instance with PowerVM UUID %s not found: %s', pvm_uuid, e)
return None return None

View File

@ -19,6 +19,7 @@ from pypowervm.tasks import hdisk
from pypowervm.tasks import partition from pypowervm.tasks import partition
from pypowervm.wrappers import virtual_io_server as pvm_vios from pypowervm.wrappers import virtual_io_server as pvm_vios
# Defines the various volume connectors that can be used. # Defines the various volume connectors that can be used.
from nova import exception from nova import exception
from oslo_utils import importutils from oslo_utils import importutils

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -24,6 +24,7 @@ from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import exception as exc from nova_powervm.virt.powervm import exception as exc
from nova_powervm.virt.powervm import vm from nova_powervm.virt.powervm import vm
LOCAL_FEED_TASK = 'local_feed_task' LOCAL_FEED_TASK = 'local_feed_task'

View File

@ -30,6 +30,7 @@ from pypowervm.tasks import partition
from pypowervm.tasks import scsi_mapper as tsk_map from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.wrappers import storage as pvm_stg from pypowervm.wrappers import storage as pvm_stg
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -81,7 +82,7 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
information to the live_migration command, it information to the live_migration command, it
should be added to this dictionary. should be added to this dictionary.
""" """
LOG.debug("Incoming mig_data=%s", mig_data) LOG.debug("Incoming mig_data=%s", mig_data, instance=self.instance)
# Check if volume is available in destination. # Check if volume is available in destination.
vol_path = self._get_path() vol_path = self._get_path()
if not os.path.exists(vol_path): if not os.path.exists(vol_path):
@ -102,15 +103,15 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
if vios_w.uuid not in self.vios_uuids: if vios_w.uuid not in self.vios_uuids:
return None return None
LOG.info("Adding logical volume disk connection between VM " LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
"%(vm)s and VIOS %(vios)s.", {'vios': vios_w.name}, instance=self.instance)
{'vm': self.instance.name, 'vios': vios_w.name},
instance=self.instance)
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path) slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path)
if slot_mgr.is_rebuild and not slot: if slot_mgr.is_rebuild and not slot:
LOG.debug('Detected a device with path %s on VIOS %s on the ' LOG.debug('Detected a device with path %(path)s on VIOS '
'rebuild that did not exist on the source. ' '%(vios)s on the rebuild that did not exist on the '
'Ignoring.', path, vios_w.uuid) 'source. Ignoring.',
{'path': path, 'vios': vios_w.uuid},
instance=self.instance)
return None return None
mapping = tsk_map.build_vscsi_mapping( mapping = tsk_map.build_vscsi_mapping(
@ -146,8 +147,7 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
if vios_w.uuid not in self.vios_uuids: if vios_w.uuid not in self.vios_uuids:
return None return None
LOG.info("Disconnecting instance %(inst)s from storage disks.", LOG.info("Disconnecting storage disks.", instance=self.instance)
{'inst': self.instance.name}, instance=self.instance)
removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid, removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid,
match_func=match_func) match_func=match_func)
for rm_map in removed_maps: for rm_map in removed_maps:

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -30,6 +30,7 @@ from taskflow import task
import six import six
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
DEVNAME_KEY = 'target_devname' DEVNAME_KEY = 'target_devname'
@ -113,7 +114,8 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
self._set_devname(device_name) self._set_devname(device_name)
self._set_udid(udid) self._set_udid(udid)
LOG.debug('Device attached: %s', device_name) LOG.debug('Device attached: %s', device_name,
instance=self.instance)
# Valid attachment # Valid attachment
return True return True
@ -138,7 +140,8 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
otherwise. otherwise.
""" """
LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
dict(vol=self.volume_id, uuid=vios_w.uuid)) dict(vol=self.volume_id, uuid=vios_w.uuid),
instance=self.instance)
device_name = None device_name = None
try: try:
device_name = self._get_devname() device_name = self._get_devname()
@ -158,13 +161,12 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
instance=self.instance) instance=self.instance)
return False return False
except Exception as e: except Exception:
LOG.warning( LOG.exception(
"Disconnect Volume: Failed to find disk on Virtual I/O " "Disconnect Volume: Failed to find device on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s." "Server %(vios_name)s for volume %(volume_id)s.",
" Error: %(error)s", {'vios_name': vios_w.name, 'volume_id': self.volume_id},
{'error': e, 'vios_name': vios_w.name, instance=self.instance)
'volume_id': self.volume_id}, instance=self.instance)
return False return False
# We have found the device name # We have found the device name
@ -203,11 +205,9 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
if not any([result['vio_modified'] if not any([result['vio_modified']
for result in ret['wrapper_task_rets'].values()]): for result in ret['wrapper_task_rets'].values()]):
LOG.warning( LOG.warning(
"Disconnect Volume: Failed to disconnect the volume " "Disconnect Volume: Failed to disconnect the volume "
"%(volume_id)s on ANY of the Virtual I/O Servers for " "%(volume_id)s on ANY of the Virtual I/O Servers.",
"instance %(inst)s.", {'volume_id': self.volume_id}, instance=self.instance)
{'inst': self.instance.name, 'volume_id': self.volume_id},
instance=self.instance)
except Exception as e: except Exception as e:
LOG.exception('PowerVM error detaching volume from virtual ' LOG.exception('PowerVM error detaching volume from virtual '

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -31,6 +31,7 @@ from nova_powervm.virt.powervm import exception as exc
from nova_powervm.virt.powervm.i18n import _ from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.volume import driver as v_driver from nova_powervm.virt.powervm.volume import driver as v_driver
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -299,11 +300,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
return False return False
# At this point, it should be correct. # At this point, it should be correct.
LOG.info("Instance %(inst)s has not yet defined a WWPN on " LOG.info("Instance has not yet defined a WWPN on fabric %(fabric)s. "
"fabric %(fabric)s. Appropriate WWPNs will be " "Appropriate WWPNs will be generated.",
"generated.", {'fabric': fabric}, instance=self.instance)
{'inst': self.instance.name, 'fabric': fabric},
instance=self.instance)
return True return True
def _hosts_wwpn(self, port_maps): def _hosts_wwpn(self, port_maps):
@ -399,10 +398,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# were already configured. # were already configured.
for fabric in self._fabric_names(): for fabric in self._fabric_names():
fc_state = self._get_fabric_state(fabric) fc_state = self._get_fabric_state(fabric)
LOG.info( LOG.info("NPIV wwpns fabric state=%(st)s.",
"NPIV wwpns fabric state=%(st)s for instance %(inst)s", {'st': fc_state}, instance=self.instance)
{'st': fc_state, 'inst': self.instance.name},
instance=self.instance)
if self._is_initial_wwpn(fc_state, fabric): if self._is_initial_wwpn(fc_state, fabric):
# Get a set of WWPNs that are globally unique from the system. # Get a set of WWPNs that are globally unique from the system.
@ -468,12 +465,13 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# set of VIOSs. # set of VIOSs.
if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0] if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0]
for pm in npiv_port_maps): for pm in npiv_port_maps):
LOG.debug("Physical ports check out - just return maps.") LOG.debug("All physical ports were found on the given Virtual I/O "
"Server(s).", instance=self.instance)
return npiv_port_maps return npiv_port_maps
# If ANY of the VIOS ports were not there, rebuild the port maps # If ANY of the VIOS ports were not there, rebuild the port maps
LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.", LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.",
npiv_port_maps) npiv_port_maps, instance=self.instance)
v_wwpns = [] v_wwpns = []
for port_map in npiv_port_maps: for port_map in npiv_port_maps:
v_wwpns.extend(port_map[1].split()) v_wwpns.extend(port_map[1].split())
@ -482,7 +480,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Derive new maps and don't preserve existing maps # Derive new maps and don't preserve existing maps
npiv_port_maps = pvm_vfcm.derive_npiv_map( npiv_port_maps = pvm_vfcm.derive_npiv_map(
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False) vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
LOG.debug("Rebuilt port maps: %s", npiv_port_maps) LOG.debug("Rebuilt port maps: %s", npiv_port_maps,
instance=self.instance)
self._set_fabric_meta(fabric, npiv_port_maps) self._set_fabric_meta(fabric, npiv_port_maps)
LOG.warning("Had to update the system metadata for the WWPNs due to " LOG.warning("Had to update the system metadata for the WWPNs due to "
"incorrect physical WWPNs on fabric %(fabric)s", "incorrect physical WWPNs on fabric %(fabric)s",
@ -583,10 +582,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
pvm_vfcm.remove_maps, self.vm_uuid, pvm_vfcm.remove_maps, self.vm_uuid,
port_map=npiv_port_map, logspec=ls) port_map=npiv_port_map, logspec=ls)
else: else:
LOG.warning("No storage connections found between the " LOG.warning("No storage connections found between the Virtual "
"Virtual I/O Servers and FC Fabric " "I/O Servers and FC Fabric %(fabric)s.",
"%(fabric)s.", {'fabric': fabric}, {'fabric': fabric}, instance=self.instance)
instance=self.instance)
def _set_fabric_state(self, fabric, state): def _set_fabric_state(self, fabric, state):
"""Sets the fabric state into the instance's system metadata. """Sets the fabric state into the instance's system metadata.
@ -599,9 +597,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
FS_INST_MAPPED: Fabric is mapped with the nova instance. FS_INST_MAPPED: Fabric is mapped with the nova instance.
""" """
meta_key = self._sys_fabric_state_key(fabric) meta_key = self._sys_fabric_state_key(fabric)
LOG.info("Setting Fabric state=%(st)s for instance=%(inst)s", LOG.info("Setting Fabric state=%(st)s.",
{'st': state, 'inst': self.instance.name}, {'st': state}, instance=self.instance)
instance=self.instance)
self.instance.system_metadata[meta_key] = state self.instance.system_metadata[meta_key] = state
def _get_fabric_state(self, fabric): def _get_fabric_state(self, fabric):
@ -651,10 +648,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
meta_elems.append(p_wwpn) meta_elems.append(p_wwpn)
meta_elems.extend(v_wwpn.split()) meta_elems.extend(v_wwpn.split())
LOG.info("Fabric %(fabric)s wwpn metadata will be set to " LOG.info("Fabric %(fabric)s wwpn metadata will be set to %(meta)s.",
"%(meta)s for instance %(inst)s", {'fabric': fabric, 'meta': ",".join(meta_elems)},
{'fabric': fabric, 'meta': ",".join(meta_elems),
'inst': self.instance.name},
instance=self.instance) instance=self.instance)
# Clear out the original metadata. We may be reducing the number of # Clear out the original metadata. We may be reducing the number of
@ -696,12 +691,10 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
if self.instance.system_metadata.get(meta_key) is None: if self.instance.system_metadata.get(meta_key) is None:
# If no mappings exist, log a warning. # If no mappings exist, log a warning.
LOG.warning("No NPIV mappings exist for instance %(inst)s on " LOG.warning("No NPIV mappings exist for instance on fabric "
"fabric %(fabric)s. May not have connected to " "%(fabric)s. May not have connected to the fabric "
"the fabric yet or fabric configuration was " "yet or fabric configuration was recently modified.",
"recently modified.", {'fabric': fabric}, instance=self.instance)
{'inst': self.instance.name, 'fabric': fabric},
instance=self.instance)
return [] return []
wwpns = self.instance.system_metadata[meta_key] wwpns = self.instance.system_metadata[meta_key]

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -28,6 +28,8 @@ from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.utils import transaction as tx from pypowervm.utils import transaction as tx
from pypowervm.wrappers import storage as pvm_stor from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios from pypowervm.wrappers import virtual_io_server as pvm_vios
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
UDID_KEY = 'target_UDID' UDID_KEY = 'target_UDID'
@ -142,9 +144,8 @@ class VscsiVolumeAdapter(object):
for a particular bus, or none of them. for a particular bus, or none of them.
""" """
def add_func(vios_w): def add_func(vios_w):
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s " LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s",
"to VM %(vm)s", {'dev': device_name, {'dev': device_name}, instance=self.instance)
'vm': self.vm_uuid})
pv = pvm_stor.PV.bld(self.adapter, device_name, udid) pv = pvm_stor.PV.bld(self.adapter, device_name, udid)
v_map = tsk_map.build_vscsi_mapping( v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv, self.host_uuid, vios_w, self.vm_uuid, pv,
@ -163,7 +164,7 @@ class VscsiVolumeAdapter(object):
# It's common to lose our specific data in the BDM. The connection # It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize # information can be 'refreshed' by operations like LPM and resize
LOG.info('Failed to retrieve device_id key from BDM for volume id ' LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id) '%s', self.volume_id, instance=self.instance)
return None return None
def _set_udid(self, udid): def _set_udid(self, udid):
@ -184,7 +185,7 @@ class VscsiVolumeAdapter(object):
# It's common to lose our specific data in the BDM. The connection # It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize # information can be 'refreshed' by operations like LPM and resize
LOG.info('Failed to retrieve device_id key from BDM for volume id ' LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id) '%s', self.volume_id, instance=self.instance)
return None return None
def _set_devname(self, devname): def _set_devname(self, devname):
@ -205,8 +206,8 @@ class VscsiVolumeAdapter(object):
used when a volume is detached from the VM. used when a volume is detached from the VM.
""" """
def rm_func(vios_w): def rm_func(vios_w):
LOG.info("Removing vSCSI mapping from Physical Volume %(dev)s " LOG.info("Removing vSCSI mapping from physical volume %(dev)s.",
"to VM %(vm)s", {'dev': device_name, 'vm': vm_uuid}) {'dev': device_name}, instance=self.instance)
removed_maps = tsk_map.remove_maps( removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid, vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name])) tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
@ -229,17 +230,19 @@ class VscsiVolumeAdapter(object):
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk :param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
""" """
def rm_hdisk(): def rm_hdisk():
LOG.info("Running remove for hdisk: '%s'", device_name) LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
"%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
try: try:
# Attempt to remove the hDisk # Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name, hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
vio_wrap.uuid) vio_wrap.uuid)
except Exception as e: except Exception:
# If there is a failure, log it, but don't stop the process # If there is a failure, log it, but don't stop the process
LOG.warning("There was an error removing the hdisk " LOG.exception("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server.", "%(disk)s from Virtual I/O Server %(vios)s.",
{'disk': device_name}) {'disk': device_name, 'vios': vio_wrap.name},
LOG.warning(e) instance=self.instance)
# Check if there are not multiple mapping for the device # Check if there are not multiple mapping for the device
if not self._check_host_mappings(vio_wrap, device_name): if not self._check_host_mappings(vio_wrap, device_name):
@ -247,8 +250,10 @@ class VscsiVolumeAdapter(object):
stg_ftsk = stg_ftsk or self.stg_ftsk stg_ftsk = stg_ftsk or self.stg_ftsk
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name)) stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
else: else:
LOG.info("hdisk %(disk)s is not removed because it has " LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
"existing storage mappings", {'disk': device_name}) "%(vios)s because it has existing storage mappings",
{'disk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
def _check_host_mappings(self, vios_wrap, device_name): def _check_host_mappings(self, vios_wrap, device_name):
"""Checks if the given hdisk has multiple mappings """Checks if the given hdisk has multiple mappings
@ -265,8 +270,9 @@ class VscsiVolumeAdapter(object):
vios_scsi_mappings, None, vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name])) tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.info("%(num)d Storage Mappings found for %(dev)s", LOG.info("%(num)d storage mappings found for %(dev)s on VIOS %(vios)s",
{'num': len(mappings), 'dev': device_name}) {'num': len(mappings), 'dev': device_name,
'vios': vios_wrap.name}, instance=self.instance)
# the mapping is still present as the task feed removes # the mapping is still present as the task feed removes
# the mapping later # the mapping later
return len(mappings) > 1 return len(mappings) > 1
@ -275,11 +281,11 @@ class VscsiVolumeAdapter(object):
"""Cleanup the hdisk associated with this udid.""" """Cleanup the hdisk associated with this udid."""
if not udid and not devname: if not udid and not devname:
LOG.warning( LOG.warning('Could not remove hdisk for volume %s', self.volume_id,
'Could not remove hdisk for volume: %s', self.volume_id) instance=self.instance)
return return
LOG.info('Removing hdisk for udid: %s', udid) LOG.info('Removing hdisk for udid: %s', udid, instance=self.instance)
def find_hdisk_to_remove(vios_w): def find_hdisk_to_remove(vios_w):
if devname is None: if devname is None:
@ -288,8 +294,9 @@ class VscsiVolumeAdapter(object):
device_name = devname device_name = devname
if device_name is None: if device_name is None:
return return
LOG.info('Removing %(hdisk)s from VIOS %(vios)s', LOG.info('Adding deferred task to remove %(hdisk)s from VIOS '
{'hdisk': device_name, 'vios': vios_w.name}) '%(vios)s.', {'hdisk': device_name, 'vios': vios_w.name},
instance=self.instance)
self._add_remove_hdisk(vios_w, device_name, self._add_remove_hdisk(vios_w, device_name,
stg_ftsk=rmv_hdisk_ftsk) stg_ftsk=rmv_hdisk_ftsk)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp. # Copyright 2015, 2017 IBM Corp.
# #
# All Rights Reserved. # All Rights Reserved.
# #
@ -30,6 +30,8 @@ from pypowervm.utils import transaction as tx
from pypowervm.wrappers import virtual_io_server as pvm_vios from pypowervm.wrappers import virtual_io_server as pvm_vios
import six import six
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -178,15 +180,16 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun) itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
if len(itls) == 0: if len(itls) == 0:
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.', LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
{'vios': vios_w.name, 'volume_id': volume_id}) {'vios': vios_w.name, 'volume_id': volume_id},
instance=self.instance)
return None, None, None return None, None, None
status, device_name, udid = hdisk.discover_hdisk(self.adapter, status, device_name, udid = hdisk.discover_hdisk(self.adapter,
vios_w.uuid, itls) vios_w.uuid, itls)
if hdisk.good_discovery(status, device_name): if hdisk.good_discovery(status, device_name):
LOG.info('Discovered %(hdisk)s on vios %(vios)s for ' LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
'volume %(volume_id)s. Status code: %(status)s.', '%(volume_id)s. Status code: %(status)s.',
{'hdisk': device_name, 'vios': vios_w.name, {'hdisk': device_name, 'vios': vios_w.name,
'volume_id': volume_id, 'status': str(status)}, 'volume_id': volume_id, 'status': str(status)},
instance=self.instance) instance=self.instance)
@ -217,9 +220,10 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid) slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid)
if slot_mgr.is_rebuild and not slot: if slot_mgr.is_rebuild and not slot:
LOG.debug('Detected a device with UDID %s on VIOS %s on the ' LOG.debug('Detected a device with UDID %(udid)s on VIOS '
'rebuild that did not exist on the source. ' '%(vios)s on the rebuild that did not exist on the '
'Ignoring.', udid, vios_w.uuid) 'source. Ignoring.', {'udid': udid, 'vios': vios_w.uuid},
instance=self.instance)
return False return False
if hdisk.good_discovery(status, device_name): if hdisk.good_discovery(status, device_name):
@ -232,7 +236,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# Save the UDID for the disk in the connection info. It is # Save the UDID for the disk in the connection info. It is
# used for the detach. # used for the detach.
self._set_udid(udid) self._set_udid(udid)
LOG.debug('Device attached: %s', device_name) LOG.debug('Added deferred task to attach device %s', device_name,
instance=self.instance)
# Valid attachment # Valid attachment
return True return True
@ -253,7 +258,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
otherwise. otherwise.
""" """
LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s", LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
dict(vol=self.volume_id, uuid=vios_w.uuid)) dict(vol=self.volume_id, uuid=vios_w.uuid),
instance=self.instance)
udid, device_name = None, None udid, device_name = None, None
try: try:
udid = self._get_udid() udid = self._get_udid()
@ -281,12 +287,12 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
instance=self.instance) instance=self.instance)
return False return False
except Exception as e: except Exception:
LOG.warning( LOG.exception(
"Disconnect Volume: Failed to find disk on Virtual I/O " "Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s. Volume " "Server %(vios_name)s for volume %(volume_id)s. Volume "
"UDID: %(volume_uid)s. Error: %(error)s", "UDID: %(volume_uid)s.",
{'error': e, 'volume_uid': udid, 'vios_name': vios_w.name, {'volume_uid': udid, 'vios_name': vios_w.name,
'volume_id': self.volume_id}, instance=self.instance) 'volume_id': self.volume_id}, instance=self.instance)
return False return False
@ -326,9 +332,7 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
for result in ret['wrapper_task_rets'].values()]): for result in ret['wrapper_task_rets'].values()]):
LOG.warning("Disconnect Volume: Failed to disconnect the " LOG.warning("Disconnect Volume: Failed to disconnect the "
"volume %(volume_id)s on ANY of the Virtual " "volume %(volume_id)s on ANY of the Virtual "
"I/O Servers for instance %(inst)s.", "I/O Servers.", {'volume_id': self.volume_id},
{'inst': self.instance.name,
'volume_id': self.volume_id},
instance=self.instance) instance=self.instance)
except Exception as e: except Exception as e: