Merge "Clean up log messages"

This commit is contained in:
Jenkins 2017-06-28 20:47:57 +00:00 committed by Gerrit Code Review
commit 560953d1c8
27 changed files with 301 additions and 347 deletions

View File

@ -1,5 +1,5 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -204,12 +204,10 @@ class DiskAdapter(object):
made.
:raise InstanceDiskMappingFailed: If the mapping could not be done.
"""
msg_args = {'instance_name': instance.name}
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
for stg_elem, vios in self.instance_disk_iter(instance,
lpar_wrap=lpar_wrap):
msg_args['disk_name'] = stg_elem.name
msg_args['vios_name'] = vios.name
msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
# Create a new mapping. NOTE: If there's an existing mapping on
# the other VIOS but not this one, we'll create a second mapping
@ -218,23 +216,21 @@ class DiskAdapter(object):
# alternative would be always checking all VIOSes for existing
# mappings, which increases the response time of the common case by
# an entire GET of VIOS+VIO_SMAP.
LOG.debug("Mapping boot disk %(disk_name)s of instance "
"%(instance_name)s to the management partition from "
"Virtual I/O Server %(vios_name)s.", msg_args)
LOG.debug("Mapping boot disk %(disk_name)s to the management "
"partition from Virtual I/O Server %(vios_name)s.",
msg_args, instance=instance)
try:
tsk_map.add_vscsi_mapping(self.host_uuid, vios, self.mp_uuid,
stg_elem)
# If that worked, we're done. add_vscsi_mapping logged.
return stg_elem, vios
except Exception as e:
msg_args['exc'] = e
LOG.warning("Failed to map boot disk %(disk_name)s of "
"instance %(instance_name)s to the management "
"partition from Virtual I/O Server "
"%(vios_name)s: %(exc)s", msg_args)
except Exception:
LOG.exception("Failed to map boot disk %(disk_name)s to the "
"management partition from Virtual I/O Server "
"%(vios_name)s.", msg_args, instance=instance)
# Try the next hit, if available.
# We either didn't find the boot dev, or failed all attempts to map it.
raise npvmex.InstanceDiskMappingFailed(**msg_args)
raise npvmex.InstanceDiskMappingFailed(instance_name=instance.name)
def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
"""Disconnect a disk from the management partition.

View File

@ -1,5 +1,5 @@
# Copyright 2013 OpenStack Foundation
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -155,8 +155,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Make sure the remove function will run within the transaction manager
def rm_func(vios_w):
LOG.info("Disconnecting instance %(inst)s from storage "
"disks.", {'inst': instance.name})
LOG.info("Disconnecting instance from storage disks.",
instance=instance)
return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func)
@ -269,9 +269,8 @@ class LocalStorage(disk_dvr.DiskAdapter):
self.adapter, name='localdisk', xag=[pvm_const.XAG.VIO_SMAP])
def add_func(vios_w):
LOG.info("Adding logical volume disk connection between VM "
"%(vm)s and VIOS %(vios)s.",
{'vm': instance.name, 'vios': vios_w.name})
LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
{'vios': vios_w.name}, instance=instance)
mapping = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, lpar_uuid, disk_info)
return tsk_map.add_map(vios_w, mapping)
@ -282,15 +281,16 @@ class LocalStorage(disk_dvr.DiskAdapter):
if stg_ftsk.name == 'localdisk':
stg_ftsk.execute()
def _validate_resizable(self, vdisk):
@staticmethod
def _validate_resizable(vdisk):
"""Validates that VDisk supports resizing
:param vdisk: The VDisk to be resized
:raise ResizeError: If resizing is not supported for the given VDisk.
"""
if (vdisk.backstore_type == pvm_stg.BackStoreType.USER_QCOW):
if vdisk.backstore_type == pvm_stg.BackStoreType.USER_QCOW:
raise nova_exc.ResizeError(
reason=_("Resizing file-backed instances is not currently"
reason=_("Resizing file-backed instances is not currently "
"supported."))
def extend_disk(self, instance, disk_info, size):
@ -327,7 +327,7 @@ class LocalStorage(disk_dvr.DiskAdapter):
# Get the disk name based on the instance and type
vol_name = self._get_disk_name(disk_info['type'], instance, short=True)
LOG.info('Extending disk: %s', vol_name)
LOG.info('Extending disk: %s', vol_name, instance=instance)
try:
_extend()
except pvm_exc.Error:

View File

@ -69,9 +69,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
self.ssp_name = self._ssp.name
self.tier_name = self._tier.name
LOG.info("SSP Storage driver initialized. "
"Cluster '%(clust_name)s'; SSP '%(ssp_name)s'; "
"Tier '%(tier_name)s",
LOG.info("SSP Storage driver initialized. Cluster '%(clust_name)s'; "
"SSP '%(ssp_name)s'; Tier '%(tier_name)s",
{'clust_name': self.clust_name, 'ssp_name': self.ssp_name,
'tier_name': self.tier_name})
@ -140,9 +139,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# Delay run function to remove the mapping between the VM and the LU
def rm_func(vios_w):
LOG.info("Removing SSP disk connection between VM %(vm)s and "
"VIOS %(vios)s.",
{'vm': instance.name, 'vios': vios_w.name})
LOG.info("Removing SSP disk connection to VIOS %(vios)s.",
{'vios': vios_w.name}, instance=instance)
return tsk_map.remove_maps(vios_w, lpar_uuid,
match_func=match_func)
@ -200,7 +198,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
def _create_disk_from_image(self, context, instance, image_meta,
image_type=disk_drv.DiskType.BOOT):
"""Creates a boot disk and links the specified image to it.
"""Creates a disk and copies the specified image to it.
If the specified image has not already been uploaded, an Image LU is
created for it. A Disk LU is then created for the instance and linked
@ -213,10 +211,9 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
:param image_type: The image type. See disk_drv.DiskType.
:return: The backing pypowervm LU storage object that was created.
"""
LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s '
'for instance %(instance_uuid)s.',
dict(image_type=image_type, image_id=image_meta.id,
instance_uuid=instance.uuid))
LOG.info('SSP: Create %(image_type)s disk from image %(image_id)s.',
dict(image_type=image_type, image_id=image_meta.id),
instance=instance)
image_lu = tsk_cs.get_or_upload_image_lu(
self._tier, self._get_image_name(image_meta),
@ -225,7 +222,7 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
image_meta.size, upload_type=tsk_stg.UploadType.IO_STREAM)
boot_lu_name = self._get_disk_name(image_type, instance)
LOG.info('SSP: Disk name is %s', boot_lu_name)
LOG.info('SSP: Disk name is %s', boot_lu_name, instance=instance)
return tsk_stg.crt_lu(
self._tier, boot_lu_name, instance.flavor.root_gb,
@ -262,9 +259,8 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
# This is the delay apply mapping
def add_func(vios_w):
LOG.info("Adding SSP disk connection between VM %(vm)s and "
"VIOS %(vios)s.",
{'vm': instance.name, 'vios': vios_w.name})
LOG.info("Adding SSP disk connection to VIOS %(vios)s.",
{'vios': vios_w.name}, instance=instance)
mapping = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, lpar_uuid, lu)
return tsk_map.add_map(vios_w, mapping)

View File

@ -219,8 +219,7 @@ class PowerVMDriver(driver.ComputeDriver):
LOG.info('Operation: %(op)s. Virtual machine display name: '
'%(display_name)s, name: %(name)s',
{'op': op, 'display_name': instance.display_name,
'name': instance.name},
instance=instance)
'name': instance.name}, instance=instance)
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
@ -627,7 +626,7 @@ class PowerVMDriver(driver.ComputeDriver):
"%(lpar_uuid)s from VIOS %(vios_name)s.",
{'num_maps': len(removals),
'lpar_uuid': pvm_inst_uuid,
'vios_name': vwrap.name})
'vios_name': vwrap.name}, instance=instance)
return removals
stg_ftsk.add_functor_subtask(_rm_vscsi_maps)
@ -852,12 +851,12 @@ class PowerVMDriver(driver.ComputeDriver):
# Power Off the LPAR
flow.add(tf_vm.PowerOff(self.adapter, instance))
# Creates the boot image.
# Create and populate the rescue disk.
flow.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta,
image_type=disk_dvr.DiskType.RESCUE))
# Connects up the disk to the LPAR
# Connect up the disk to the LPAR
flow.add(tf_stg.ConnectDisk(self.disk_dvr, instance))
# Last step is to power on the system.
@ -1774,8 +1773,8 @@ class PowerVMDriver(driver.ComputeDriver):
vol_cls = vol_attach.get_volume_class(driver_type)
xags.update(set(vol_cls.min_xags()))
LOG.debug('Instance XAGs for VM %(inst)s is %(xags)s.',
{'inst': instance.name, 'xags': ','.join(xags)})
LOG.debug('Instance XAGs: %(xags)s.', {'xags': ','.join(xags)},
instance=instance)
return list(xags)
def _get_boot_connectivity_type(self, context, bdms, block_device_info):
@ -1798,7 +1797,8 @@ class PowerVMDriver(driver.ComputeDriver):
else:
return boot_conn_type
def _get_connectivity_type(self, bdm):
@staticmethod
def _get_connectivity_type(bdm):
conn_info = bdm.get('connection_info')
if 'connection-type' in conn_info['data']:
connectivity_type = conn_info['data']['connection-type']

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2016 IBM Corp.
# Copyright 2014, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -99,7 +99,8 @@ class PowerVMNovaEventHandler(pvm_apt.WrapperEventHandler):
if inst is None:
return None
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid)
LOG.debug('Handle NVRAM event for PowerVM LPAR %s', pvm_uuid,
instance=inst)
self._driver.nvram_mgr.store(inst)
# If the state of the vm changed see if it should be handled
@ -174,8 +175,8 @@ class PowerVMLifecycleEventHandler(object):
pvm_state = vm.get_vm_qp(self._driver.adapter, pvm_uuid,
'PartitionState')
except exception.InstanceNotFound:
LOG.debug("Instance for LPAR %s was deleted while event was "
"delayed.", pvm_uuid)
LOG.debug("LPAR %s was deleted while event was delayed.", pvm_uuid,
instance=inst)
return
LOG.debug('New state %s for partition %s', pvm_state, pvm_uuid,

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2015 IBM Corp.
# Copyright 2014, 2017 IBM Corp.
#
# All Rights Reserved.
#

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#

View File

@ -35,6 +35,7 @@ from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -142,9 +143,8 @@ class LiveMigrationDest(LiveMigration):
:param migrate_data: a PowerVMLiveMigrateData object
:param vol_drvs: volume drivers for the attached volumes
"""
LOG.debug('Running pre live migration on destination.',
instance=self.instance)
LOG.debug('Migration data: %s', migrate_data, instance=self.instance)
LOG.debug('Running pre live migration on destination. Migration data: '
'%s', migrate_data, instance=self.instance)
# Set the ssh auth key.
mgmt_task.add_authorized_key(self.drvr.adapter,
@ -204,7 +204,7 @@ class LiveMigrationDest(LiveMigration):
try:
vol_drv.post_live_migration_at_destination(mig_vol_stor)
except Exception:
LOG.exception("PowerVM error cleaning up destincation host "
LOG.exception("PowerVM error cleaning up destination host "
"after migration.", instance=self.instance)
# It failed.
raise LiveMigrationVolume(
@ -270,7 +270,8 @@ class LiveMigrationSrc(LiveMigration):
lpar_w = vm.get_instance_wrapper(self.drvr.adapter, self.instance)
self.lpar_w = lpar_w
LOG.debug('Dest Migration data: %s', self.mig_data)
LOG.debug('Dest Migration data: %s', self.mig_data,
instance=self.instance)
# Check proc compatibility modes
if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in
@ -307,7 +308,7 @@ class LiveMigrationSrc(LiveMigration):
vol_drv.pre_live_migration_on_source(vol_data)
self.mig_data.vol_data = vol_data
LOG.debug('Src Migration data: %s', self.mig_data,
LOG.debug('Source migration data: %s', self.mig_data,
instance=self.instance)
# Create a FeedTask to scrub any orphaned mappings/storage associated
@ -333,8 +334,8 @@ class LiveMigrationSrc(LiveMigration):
:param context: security context
:param migrate_data: a PowerVMLiveMigrateData object
"""
LOG.debug("Starting migration.", instance=self.instance)
LOG.debug("Migrate data: %s", migrate_data, instance=self.instance)
LOG.debug("Starting migration. Migrate data: %s", migrate_data,
instance=self.instance)
# The passed in mig data has more info (dest data added), so replace
self.mig_data = migrate_data
@ -376,8 +377,7 @@ class LiveMigrationSrc(LiveMigration):
trunk_to_del.delete()
except Exception:
with excutils.save_and_reraise_exception(logger=LOG):
LOG.exception("Live migration failed.",
instance=self.instance)
LOG.exception("Live migration failed.", instance=self.instance)
finally:
LOG.debug("Finished migration.", instance=self.instance)
@ -439,9 +439,8 @@ class LiveMigrationSrc(LiveMigration):
if self.lpar_w.migration_state != 'Not_Migrating':
self.migration_recover()
except Exception as ex:
LOG.error("Migration recover failed with error: %s", ex,
instance=self.instance)
except Exception:
LOG.exception("Migration rollback failed.", instance=self.instance)
finally:
LOG.debug("Finished migration rollback.", instance=self.instance)

View File

@ -37,6 +37,7 @@ from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -97,8 +98,7 @@ class ConfigDrivePowerVM(object):
:return iso_path: The path to the ISO
:return file_name: The file name for the ISO
"""
LOG.info("Creating config drive for instance: %s", instance.name,
instance=instance)
LOG.info("Creating config drive.", instance=instance)
extra_md = {}
if admin_pass is not None:
extra_md['admin_pass'] = admin_pass
@ -121,10 +121,8 @@ class ConfigDrivePowerVM(object):
max_len=pvm_const.MaxLen.VOPT_NAME)
iso_path = os.path.join(im_path, file_name)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
LOG.info("Config drive ISO being built for instance %(inst)s "
"building to path %(iso_path)s.",
{'inst': instance.name, 'iso_path': iso_path},
instance=instance)
LOG.info("Config drive ISO building to path %(iso_path)s.",
{'iso_path': iso_path}, instance=instance)
# In case, if there's an OSError related failure while
# creating config drive, retry make drive operation.
@ -210,10 +208,8 @@ class ConfigDrivePowerVM(object):
# Define the function to build and add the mapping
def add_func(vios_w):
LOG.info("Adding cfg drive mapping for instance %(inst)s for "
"Virtual I/O Server %(vios)s",
{'inst': instance.name, 'vios': vios_w.name},
instance=instance)
LOG.info("Adding config drive mapping to Virtual I/O Server "
"%(vios)s", {'vios': vios_w.name}, instance=instance)
mapping = tsk_map.build_vscsi_mapping(self.host_uuid, vios_w,
lpar_uuid, vopt)
return tsk_map.add_map(vios_w, mapping)

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -34,6 +34,7 @@ import retrying
from nova_powervm.virt.powervm import exception as npvmex
LOG = logging.getLogger(__name__)
_MP_UUID = None

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp.
# Copyright 2016, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -119,10 +119,9 @@ class NvramManager(object):
# Remove it from the store
try:
self._api.delete(instance)
except Exception as e:
except Exception:
# Delete exceptions should not end the operation
LOG.warning('Could not delete NVRAM: %s', e,
instance=instance)
LOG.exception('Could not delete NVRAM.', instance=instance)
@lockutils.synchronized(LOCK_NVRAM_UPDT_LIST)
def _add_to_list(self, instance):
@ -166,7 +165,7 @@ class NvramManager(object):
self._pop_from_list(uuid=instance.uuid)
try:
LOG.debug('Updating NVRAM for instance: %s', instance.uuid)
LOG.debug('Updating NVRAM.', instance=instance)
data = vm.get_instance_wrapper(
self._adapter, instance, xag=[pvm_const.XAG.NVRAM]).nvram
LOG.debug('NVRAM for instance: %s', data, instance=instance)

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp.
# Copyright 2016, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -33,6 +33,7 @@ from oslo_utils import excutils
from swiftclient import exceptions as swft_exc
from swiftclient import service as swft_srv
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -126,7 +127,8 @@ class SwiftNvramStore(api.NvramStore):
def _store(self, inst_key, inst_name, data, exists=None):
"""Store the NVRAM into the storage service.
:param instance: instance object
:param inst_key: The key by which to store the data in the repository.
:param inst_name: The name of the instance.
:param data: the NVRAM data base64 encoded string
:param exists: (Optional, Default: None) If specified, tells the upload
whether or not the object exists. Should be a boolean
@ -216,12 +218,11 @@ class SwiftNvramStore(api.NvramStore):
data = data.encode('ascii')
md5 = hashlib.md5(data).hexdigest()
if existing_hash == md5:
LOG.info('NVRAM has not changed for instance: %s',
instance.name, instance=instance)
LOG.info('NVRAM has not changed.', instance=instance)
return
self._store(instance.uuid, instance.name, data, exists=exists)
LOG.debug('NVRAM updated for instance: %s', instance.name)
LOG.debug('NVRAM updated', instance=instance)
def store_slot_map(self, inst_key, data):
"""Store the Slot Map to Swift.

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp.
# Copyright 2016, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -24,6 +24,7 @@ from pypowervm.tasks import storage as pvm_tstor
from nova_powervm.virt.powervm import exception as p_exc
LOG = logging.getLogger(__name__)
_SLOT_KEY = "CLIENT_SLOT_DATA"
@ -177,12 +178,10 @@ class SwiftSlotManager(NovaSlotManager):
self.store_api.delete_slot_map(key)
except Exception:
LOG.warning("Unable to delete the slot map from Swift backing "
"store with ID %(key)s. Will require "
"manual cleanup.", {'key': key},
instance=self.instance)
"store with ID %(key)s. Will require manual cleanup.",
{'key': key}, instance=self.instance)
class NoopSlotManager(NovaSlotManager):
"""No op Slot Map (for when Swift is not used - which is standard)."""
pass

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -19,6 +19,7 @@ from taskflow import task
from nova_powervm.virt.powervm import image
LOG = logging.getLogger(__name__)
@ -73,10 +74,9 @@ class StreamToGlance(task.Task):
def execute(self, disk_path):
metadata = image.snapshot_metadata(self.context, self.image_api,
self.image_id, self.instance)
LOG.info("Starting stream of boot device for instance %(inst)s "
"(local blockdev %(devpath)s) to glance image "
"%(img_id)s.",
{'inst': self.instance.name, 'devpath': disk_path,
'img_id': self.image_id}, instance=self.instance)
LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
"to glance image %(img_id)s.",
{'devpath': disk_path, 'img_id': self.image_id},
instance=self.instance)
image.stream_blockdev_to_glance(self.context, self.image_api,
self.image_id, metadata, disk_path)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -27,6 +27,7 @@ from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import vif
from nova_powervm.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -60,11 +61,9 @@ class UnplugVifs(task.Task):
# error up front.
modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable:
LOG.error('Unable to remove VIFs from instance %(inst)s '
'because the system is not in a correct state. '
'The reason reported by the system is: %(reason)s',
{'inst': self.instance.name, 'reason': reason},
instance=self.instance)
LOG.error("Unable to remove VIFs in the instance's current state. "
"The reason reported by the system is: %(reason)s",
{'reason': reason}, instance=self.instance)
raise exception.VirtualInterfaceUnplugException(reason=reason)
# Get all the current Client Network Adapters (CNA) on the VM itself.
@ -149,12 +148,9 @@ class PlugVifs(task.Task):
# Check to see if the LPAR is OK to add VIFs to.
modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable and self.crt_network_infos:
LOG.error('Unable to create VIF(s) for instance %(sys)s. The '
'VM was in a state where VIF plugging is not '
'acceptable. The reason from the system is: '
'%(reason)s',
{'sys': self.instance.name, 'reason': reason},
instance=self.instance)
LOG.error("Unable to create VIF(s) in the instance's current "
"state. The reason reported by the system is: "
"%(reason)s", {'reason': reason}, instance=self.instance)
raise exception.VirtualInterfaceCreateException()
# TODO(KYLEH): We're setting up to wait for an instance event. The
@ -177,9 +173,8 @@ class PlugVifs(task.Task):
# not wait for the neutron event as that likely won't be sent (it was
# already done).
for network_info in self.update_network_infos:
LOG.info("Updating VIF with mac %(mac)s for instance %(sys)s",
{'mac': network_info['address'],
'sys': self.instance.name}, instance=self.instance)
LOG.info("Updating VIF with mac %(mac)s",
{'mac': network_info['address']}, instance=self.instance)
vif.plug(self.adapter, self.host_uuid, self.instance,
network_info, self.slot_mgr, new_vif=False)
@ -190,10 +185,8 @@ class PlugVifs(task.Task):
deadline=CONF.vif_plugging_timeout,
error_callback=self._vif_callback_failed):
for network_info in self.crt_network_infos:
LOG.info('Creating VIF with mac %(mac)s for instance '
'%(sys)s',
{'mac': network_info['address'],
'sys': self.instance.name},
LOG.info('Creating VIF with mac %(mac)s.',
{'mac': network_info['address']},
instance=self.instance)
new_vif = vif.plug(
self.adapter, self.host_uuid, self.instance,
@ -202,8 +195,7 @@ class PlugVifs(task.Task):
pvm_net.CNA):
self.cnas.append(new_vif)
except eventlet.timeout.Timeout:
LOG.error('Error waiting for VIF to be created for instance '
'%(sys)s', {'sys': self.instance.name},
LOG.error('Error waiting for VIF to be created.',
instance=self.instance)
raise exception.VirtualInterfaceCreateException()
finally:
@ -216,9 +208,8 @@ class PlugVifs(task.Task):
return self.cnas
def _vif_callback_failed(self, event_name, instance):
LOG.error('VIF Plug failure for callback on event '
'%(event)s for instance %(uuid)s',
{'event': event_name, 'uuid': instance.uuid})
LOG.error('VIF plug failure for callback on event %(event)s.',
{'event': event_name}, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
@ -247,8 +238,8 @@ class PlugVifs(task.Task):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning('VIF creation being rolled back for instance %(inst)s',
{'inst': self.instance.name}, instance=self.instance)
LOG.warning('VIF creation is being rolled back.',
instance=self.instance)
# Get the current adapters on the system
cna_w_list = vm.get_cnas(self.adapter, self.instance)
@ -293,20 +284,19 @@ class PlugMgmtVif(task.Task):
# return None because the Config Drive step (which may be used...may
# not be) required the mgmt vif.
if not CONF.powervm.use_rmc_mgmt_vif:
LOG.debug('No management VIF created for instance %s as the conf '
'option use_rmc_mgmt_vif is set to False',
self.instance.name)
LOG.debug('No management VIF created because '
'CONF.powervm.use_rmc_mgmt_vif is False',
instance=self.instance)
return None
LOG.info('Plugging the Management Network Interface to instance %s',
self.instance.name, instance=self.instance)
LOG.info('Plugging the management network interface.',
instance=self.instance)
# Determine if we need to create the secure RMC VIF. This should only
# be needed if there is not a VIF on the secure RMC vSwitch
vswitch = vif.get_secure_rmc_vswitch(self.adapter, self.host_uuid)
if vswitch is None:
LOG.warning('No management VIF created for instance %s due to '
'lack of Management Virtual Switch',
self.instance.name)
LOG.warning('No management VIF created due to lack of management '
'virtual switch', instance=self.instance)
return None
# This next check verifies that there are no existing NICs on the
@ -318,8 +308,7 @@ class PlugMgmtVif(task.Task):
has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas]
if has_mgmt_vif:
LOG.debug('Management VIF already created for instance %s',
self.instance.name)
LOG.debug('Management VIF already exists.', instance=self.instance)
return None
# Return the created management CNA

View File

@ -1,4 +1,4 @@
# Copyright 2016 IBM Corp.
# Copyright 2016, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -18,6 +18,7 @@
from oslo_log import log as logging
from taskflow import task
LOG = logging.getLogger(__name__)
@ -42,8 +43,8 @@ class SaveSlotStore(task.Task):
super(SaveSlotStore, self).__init__('save_slot_store')
def execute(self):
LOG.debug("Topology for instance %(inst)s: %(topo)s",
{'inst': self.instance.name, 'topo': self.slot_mgr.topology})
LOG.debug("Topology: %(topo)s", {'topo': self.slot_mgr.topology},
instance=self.instance)
self.slot_mgr.save()

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import six
from pypowervm.tasks import scsi_mapper as pvm_smap
from oslo_log import log as logging
@ -27,6 +25,7 @@ from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm import media
from nova_powervm.virt.powervm import mgmt
LOG = logging.getLogger(__name__)
@ -49,15 +48,15 @@ class ConnectVolume(task.Task):
super(ConnectVolume, self).__init__('connect_vol_%s' % self.vol_id)
def execute(self):
LOG.info('Connecting volume %(vol)s to instance %(inst)s',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
LOG.info('Connecting volume %(vol)s.', {'vol': self.vol_id},
instance=self.vol_drv.instance)
self.vol_drv.connect_volume(self.slot_mgr)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning('Volume %(vol)s for instance %(inst)s to be disconnected',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
LOG.warning('Rolling back connection for volume %(vol)s.',
{'vol': self.vol_id}, instance=self.vol_drv.instance)
# Note that the rollback is *instant*. Resetting the FeedTask ensures
# immediate rollback.
@ -68,13 +67,11 @@ class ConnectVolume(task.Task):
# was connected. This attempts to clear anything out to make sure
# the terminate connection runs smoothly.
self.vol_drv.disconnect_volume(self.slot_mgr)
except npvmex.VolumeDetachFailed as e:
except npvmex.VolumeDetachFailed:
# Only log that the volume detach failed. Should not be blocking
# due to being in the revert flow.
LOG.warning("Unable to disconnect volume for %(inst)s during "
"rollback. Error was: %(error)s",
{'inst': self.vol_drv.instance.name,
'error': e.message})
LOG.exception("Unable to disconnect volume %s during rollback.",
self.vol_id, instance=self.vol_drv.instance)
class DisconnectVolume(task.Task):
@ -97,15 +94,15 @@ class DisconnectVolume(task.Task):
'disconnect_vol_%s' % self.vol_id)
def execute(self):
LOG.info('Disconnecting volume %(vol)s from instance %(inst)s',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
LOG.info('Disconnecting volume %(vol)s.',
{'vol': self.vol_id}, instance=self.vol_drv.instance)
self.vol_drv.disconnect_volume(self.slot_mgr)
def revert(self, result, flow_failures):
# The parameters have to match the execute method, plus the response +
# failures even if only a subset are used.
LOG.warning('Volume %(vol)s for instance %(inst)s to be re-connected',
{'vol': self.vol_id, 'inst': self.vol_drv.instance.name})
LOG.warning('Reconnecting volume %(vol)s on disconnect rollback.',
{'vol': self.vol_id}, instance=self.vol_drv.instance)
# Note that the rollback is *instant*. Resetting the FeedTask ensures
# immediate rollback.
@ -117,13 +114,11 @@ class DisconnectVolume(task.Task):
# in error scenarios. This is simply useful for debug purposes
# if there is an operational error.
self.vol_drv.connect_volume(self.slot_mgr)
except npvmex.VolumeAttachFailed as e:
except npvmex.VolumeAttachFailed:
# Only log that the volume attach failed. Should not be blocking
# due to being in the revert flow. See comment above.
LOG.warning("Unable to re-connect volume for %(inst)s during "
"rollback. Error was: %(error)s",
{'inst': self.vol_drv.instance.name,
'error': e.message})
LOG.exception("Unable to reconnect volume %s during rollback.",
self.vol_id, instance=self.vol_drv.instance)
class CreateDiskForImg(task.Task):
@ -260,7 +255,7 @@ class InstanceDiskToMgmt(task.Task):
# Scan the SCSI bus, discover the disk, find its canonical path.
LOG.info("Discovering device and path for mapping of %(dev_name)s "
"on the management partition.",
{'dev_name': self.stg_elem.name})
{'dev_name': self.stg_elem.name}, instance=self.instance)
self.disk_path = mgmt.discover_vscsi_disk(the_map)
return self.stg_elem, self.vios_wrap, self.disk_path
@ -273,20 +268,18 @@ class InstanceDiskToMgmt(task.Task):
if self.vios_wrap is None or self.stg_elem is None:
# We never even got connected - nothing to do
return
LOG.warning("Unmapping boot disk %(disk_name)s of instance "
"%(instance_name)s from management partition via "
"Virtual I/O Server %(vios_name)s.",
LOG.warning("Unmapping boot disk %(disk_name)s from the management "
"partition via Virtual I/O Server %(vioname)s.",
{'disk_name': self.stg_elem.name,
'instance_name': self.instance.name,
'vios_name': self.vios_wrap.name})
'vioname': self.vios_wrap.name}, instance=self.instance)
self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
self.stg_elem.name)
if self.disk_path is None:
# We did not discover the disk - nothing else to do.
return
LOG.warning("Removing disk %(disk_path)s from the management "
"partition.", {'disk_path': self.disk_path})
LOG.warning("Removing disk %(dpath)s from the management partition.",
{'dpath': self.disk_path}, instance=self.instance)
mgmt.remove_block_dev(self.disk_path)
@ -330,15 +323,13 @@ class RemoveInstanceDiskFromMgmt(task.Task):
# stg_elem is None if boot disk was not mapped to management partition
if stg_elem is None:
return
LOG.info("Unmapping boot disk %(disk_name)s of instance "
"%(instance_name)s from management partition via Virtual "
"I/O Server %(vios_name)s.",
{'disk_name': stg_elem.name,
'instance_name': self.instance.name,
'vios_name': vios_wrap.name})
LOG.info("Unmapping boot disk %(disk_name)s from the management "
"partition via Virtual I/O Server %(vios_name)s.",
{'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
instance=self.instance)
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
LOG.info("Removing disk %(disk_path)s from the management "
"partition.", {'disk_path': disk_path})
LOG.info("Removing disk %(disk_path)s from the management partition.",
{'disk_path': disk_path}, instance=self.instance)
mgmt.remove_block_dev(disk_path)
@ -397,10 +388,9 @@ class CreateAndConnectCfgDrive(task.Task):
# Delete the virtual optical media. If it fails we don't care.
try:
self.mb.dlt_vopt(lpar_wrap.uuid)
except Exception as e:
LOG.warning('Vopt removal as part of spawn reversion failed '
'with: %(exc)s', {'exc': six.text_type(e)},
instance=self.instance)
except Exception:
LOG.exception('VOpt removal (as part of reversion) failed.',
instance=self.instance)
class DeleteVOpt(task.Task):
@ -501,9 +491,8 @@ class SaveBDM(task.Task):
super(SaveBDM, self).__init__('save_bdm_%s' % self.bdm.volume_id)
def execute(self):
LOG.info('Saving block device mapping for volume id %(vol_id)s '
'on instance %(inst)s.',
{'vol_id': self.bdm.volume_id, 'inst': self.instance.name})
LOG.info('Saving block device mapping for volume id %(vol_id)s.',
{'vol_id': self.bdm.volume_id}, instance=self.instance)
self.bdm.save()
@ -558,6 +547,7 @@ class ExtendDisk(task.Task):
super(ExtendDisk, self).__init__('extend_disk_%s' % disk_info['type'])
def execute(self):
LOG.info('Extending disk size of disk: %(disk)s size: %(size)s.',
{'disk': self.disk_info['type'], 'size': self.size})
LOG.info('Extending %(disk_type)s disk to %(size)s GB.',
{'disk_type': self.disk_info['type'], 'size': self.size},
instance=self.instance)
self.disk_dvr.extend_disk(self.instance, self.disk_info, self.size)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -98,8 +98,7 @@ class Create(task.Task):
def execute(self):
data = None
if self.nvram_mgr is not None:
LOG.info('Fetching NVRAM for instance %s.',
self.instance.name, instance=self.instance)
LOG.info('Fetching NVRAM.', instance=self.instance)
data = self.nvram_mgr.fetch(self.instance)
LOG.debug('NVRAM data is: %s', data, instance=self.instance)
@ -114,8 +113,7 @@ class Create(task.Task):
# build map earlier in the spawn, just before the LPAR is created.
# Only rebuilds should be passing in None for stg_ftsk.
if self.stg_ftsk.name == 'create_scrubber':
LOG.info('Scrubbing storage for instance %s as part of '
'rebuild.', self.instance.name,
LOG.info('Scrubbing storage as part of rebuild.',
instance=self.instance)
self.stg_ftsk.execute()
@ -202,11 +200,12 @@ class PowerOn(task.Task):
vm.power_on(self.adapter, self.instance, opts=self.pwr_opts)
def revert(self, result, flow_failures):
LOG.warning('Powering off instance: %s', self.instance.name)
LOG.warning('Rolling back power-on.', instance=self.instance)
if isinstance(result, task_fail.Failure):
# The power on itself failed...can't power off.
LOG.debug('Power on failed. Not performing power off.')
LOG.debug('Power on failed. Not performing power off.',
instance=self.instance)
return
vm.power_off(self.adapter, self.instance, force_immediate=True)
@ -220,7 +219,6 @@ class PowerOff(task.Task):
"""Creates the Task to power off an LPAR.
:param adapter: The adapter for the pypowervm API
:param lpar_uuid: The UUID of the lpar that has media.
:param instance: The nova instance.
:param force_immediate: Boolean. Perform a VSP hard power off.
"""
@ -257,8 +255,7 @@ class StoreNvram(task.Task):
try:
self.nvram_mgr.store(self.instance, immediate=self.immediate)
except Exception:
LOG.exception('Unable to store NVRAM for instance %(name)s.',
{'name': self.instance.name}, instance=self.instance)
LOG.exception('Unable to store NVRAM.', instance=self.instance)
class DeleteNvram(task.Task):
@ -280,13 +277,11 @@ class DeleteNvram(task.Task):
LOG.info("No op for NVRAM delete.", instance=self.instance)
return
LOG.info('Deleting NVRAM for instance: %s',
self.instance.name, instance=self.instance)
LOG.info('Deleting NVRAM', instance=self.instance)
try:
self.nvram_mgr.remove(self.instance)
except Exception:
LOG.exception('Unable to delete NVRAM for instance %(name)s.',
{'name': self.instance.name}, instance=self.instance)
LOG.exception('Unable to delete NVRAM.', instance=self.instance)
class Delete(task.Task):
@ -297,7 +292,6 @@ class Delete(task.Task):
"""Create the Task to delete the VM from the system.
:param adapter: The adapter for the pypowervm API.
:param lpar_uuid: The VM's PowerVM UUID.
:param instance: The nova instance.
"""
super(Delete, self).__init__('dlt_vm')

View File

@ -42,6 +42,7 @@ from pypowervm.wrappers import network as pvm_net
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm import vm
LOG = log.getLogger(__name__)
SECURE_RMC_VSWITCH = 'MGMTSWITCH'
@ -368,25 +369,18 @@ class PvmVifDriver(object):
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was '
'not found on the instance.',
{'mac': vif['address']}, instance=self.instance)
return None
LOG.info('Deleting VIF with mac %(mac)s for instance %(inst)s.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.info('Deleting VIF with mac %(mac)s.',
{'mac': vif['address']}, instance=self.instance)
try:
cna_w.delete()
except Exception as e:
LOG.error('Unable to unplug VIF with mac %(mac)s for instance '
'%(inst)s.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.exception("PowerVM error during vif unplug.",
instance=self.instance)
LOG.exception('Unable to unplug VIF with mac %(mac)s.',
{'mac': vif['address']}, instance=self.instance)
raise exception.VirtualInterfaceUnplugException(
reason=six.text_type(e))
return cna_w
@ -485,7 +479,8 @@ class PvmSeaVifDriver(PvmVifDriver):
if not vlan:
vlan = int(vif['details']['vlan'])
LOG.debug("Creating SEA based VIF with VLAN %s", str(vlan))
LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan),
instance=self.instance)
cna_w = pvm_cna.crt_cna(self.adapter, self.host_uuid, lpar_uuid, vlan,
mac_addr=vif['address'], slot_num=slot_num)
@ -600,11 +595,9 @@ class PvmLBVifDriver(PvmLioVifDriver):
# Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was '
'not found on the instance.',
{'mac': vif['address']}, instance=self.instance)
return None
# Find and delete the trunk adapters
@ -615,12 +608,11 @@ class PvmLBVifDriver(PvmLioVifDriver):
try:
utils.execute('brctl', 'delif', vif['network']['bridge'],
dev_name, run_as_root=True)
except Exception as e:
LOG.warning('Unable to delete device %(dev_name)s from bridge '
'%(bridge)s. Error: %(error)s',
{'dev_name': dev_name,
'bridge': vif['network']['bridge'],
'error': e.message}, instance=self.instance)
except Exception:
LOG.exception(
'Unable to delete device %(dev_name)s from bridge %(bridge)s.',
{'dev_name': dev_name, 'bridge': vif['network']['bridge']},
instance=self.instance)
for trunk in trunks:
trunk.delete()
@ -648,10 +640,8 @@ class PvmVnicSriovVifDriver(PvmVifDriver):
network = napi.get(admin_context, net_id)
physnet = network.physical_network
LOG.debug("Plugging vNIC SR-IOV vif for physical network "
"'%(physnet)s' into instance %(inst)s.",
{'physnet': physnet, 'inst': self.instance.name},
instance=self.instance)
LOG.debug("Plugging vNIC SR-IOV vif for physical network %(physnet)s.",
{'physnet': physnet}, instance=self.instance)
# Get the msys
msys = pvm_ms.System.get(self.adapter)[0]
@ -697,11 +687,9 @@ class PvmVnicSriovVifDriver(PvmVifDriver):
vnic = vm.get_vnics(
self.adapter, self.instance, mac=mac, one_result=True)
if not vnic:
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. No matching vNIC was found '
'on the instance. VIF: %(vif)s',
{'mac': mac, 'inst': self.instance.name, 'vif': vif},
instance=self.instance)
LOG.warning('Unable to unplug VIF with mac %(mac)s. No matching '
'vNIC was found on the instance. VIF: %(vif)s',
{'mac': mac, 'vif': vif}, instance=self.instance)
return None
vnic.delete()
return vnic
@ -770,11 +758,9 @@ class PvmOvsVifDriver(PvmLioVifDriver):
# Find the CNA for this vif.
cna_w = self._find_cna_for_vif(cna_w_list, vif)
if not cna_w:
LOG.warning('Unable to unplug VIF with mac %(mac)s for '
'instance %(inst)s. The VIF was not found on '
'the instance.',
{'mac': vif['address'], 'inst': self.instance.name},
instance=self.instance)
LOG.warning('Unable to unplug VIF with mac %(mac)s for. The VIF '
'was not found on the instance.',
{'mac': vif['address']}, instance=self.instance)
return None
# Find and delete the trunk adapters
@ -822,8 +808,8 @@ class PvmOvsVifDriver(PvmLioVifDriver):
# Save this data for the migration command.
vea_vlan_mappings[vif['address']] = cna_w.pvid
LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with "
"PVID %(pvid)s",
LOG.info("VIF with mac %(mac)s is going on trunk %(dev)s with PVID "
"%(pvid)s",
{'mac': vif['address'], 'dev': dev, 'pvid': cna_w.pvid},
instance=self.instance)

View File

@ -1,4 +1,4 @@
# Copyright 2014, 2015, 2016 IBM Corp.
# Copyright 2014, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -46,6 +46,7 @@ from nova_powervm import conf as cfg
from nova_powervm.virt.powervm import exception as nvex
from nova_powervm.virt.powervm.i18n import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -124,7 +125,8 @@ def translate_event(pvm_state, pwr_state):
elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING:
trans = event.EVENT_LIFECYCLE_RESUMED
LOG.debug('Translated Event to %s', trans)
LOG.debug('Translated {PowerVM state %s; power state %s} to %s',
pvm_state, pwr_state, trans)
return trans
@ -677,29 +679,27 @@ def power_off(adapter, instance, opts=None, force_immediate=False,
entry = get_instance_wrapper(adapter, instance)
# Get the current state and see if we can stop the VM
LOG.debug("Powering off request for instance %(inst)s which is in "
"state %(state)s. Force Immediate Flag: %(force)s.",
{'inst': instance.name, 'state': entry.state,
'force': force_immediate})
LOG.debug("Power off requested for instance in state %(state)s. Force "
"Immediate Flag: %(force)s.",
{'state': entry.state, 'force': force_immediate},
instance=instance)
if entry.state in POWERVM_STOPABLE_STATE:
# Now stop the lpar
try:
LOG.debug("Power off executing for instance %(inst)s.",
{'inst': instance.name})
LOG.debug("Power off executing.", instance=instance)
kwargs = {'timeout': timeout} if timeout else {}
force_flag = (power.Force.TRUE if force_immediate
else power.Force.ON_FAILURE)
power.power_off(entry, None, force_immediate=force_flag,
add_parms=opts, **kwargs)
except Exception as e:
LOG.exception("Failed to power of instance.",
LOG.exception("Failed to power off instance.",
instance=instance)
raise exception.InstancePowerOffFailure(
reason=six.text_type(e))
return True
else:
LOG.debug("Power off not required for instance %(inst)s.",
{'inst': instance.name})
LOG.debug("Power off not required.", instance=instance)
return False
@ -779,7 +779,7 @@ def get_instance(context, pvm_uuid):
except exception.InstanceNotFound:
pass
except Exception as e:
LOG.debug('PowerVM UUID not found. %s', e)
LOG.debug('Instance with PowerVM UUID %s not found: %s', pvm_uuid, e)
return None

View File

@ -19,6 +19,7 @@ from pypowervm.tasks import hdisk
from pypowervm.tasks import partition
from pypowervm.wrappers import virtual_io_server as pvm_vios
# Defines the various volume connectors that can be used.
from nova import exception
from oslo_utils import importutils

View File

@ -1,4 +1,4 @@
# Copyright 2015 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -24,6 +24,7 @@ from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import exception as exc
from nova_powervm.virt.powervm import vm
LOCAL_FEED_TASK = 'local_feed_task'

View File

@ -30,6 +30,7 @@ from pypowervm.tasks import partition
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.wrappers import storage as pvm_stg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -81,7 +82,7 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
information to the live_migration command, it
should be added to this dictionary.
"""
LOG.debug("Incoming mig_data=%s", mig_data)
LOG.debug("Incoming mig_data=%s", mig_data, instance=self.instance)
# Check if volume is available in destination.
vol_path = self._get_path()
if not os.path.exists(vol_path):
@ -102,15 +103,15 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
if vios_w.uuid not in self.vios_uuids:
return None
LOG.info("Adding logical volume disk connection between VM "
"%(vm)s and VIOS %(vios)s.",
{'vm': self.instance.name, 'vios': vios_w.name},
instance=self.instance)
LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
{'vios': vios_w.name}, instance=self.instance)
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, path)
if slot_mgr.is_rebuild and not slot:
LOG.debug('Detected a device with path %s on VIOS %s on the '
'rebuild that did not exist on the source. '
'Ignoring.', path, vios_w.uuid)
LOG.debug('Detected a device with path %(path)s on VIOS '
'%(vios)s on the rebuild that did not exist on the '
'source. Ignoring.',
{'path': path, 'vios': vios_w.uuid},
instance=self.instance)
return None
mapping = tsk_map.build_vscsi_mapping(
@ -146,8 +147,7 @@ class FileIOVolumeAdapter(v_driver.PowerVMVolumeAdapter):
if vios_w.uuid not in self.vios_uuids:
return None
LOG.info("Disconnecting instance %(inst)s from storage disks.",
{'inst': self.instance.name}, instance=self.instance)
LOG.info("Disconnecting storage disks.", instance=self.instance)
removed_maps = tsk_map.remove_maps(vios_w, self.vm_uuid,
match_func=match_func)
for rm_map in removed_maps:

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -30,6 +30,7 @@ from taskflow import task
import six
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
DEVNAME_KEY = 'target_devname'
@ -113,7 +114,8 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
self._set_devname(device_name)
self._set_udid(udid)
LOG.debug('Device attached: %s', device_name)
LOG.debug('Device attached: %s', device_name,
instance=self.instance)
# Valid attachment
return True
@ -138,7 +140,8 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
otherwise.
"""
LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
dict(vol=self.volume_id, uuid=vios_w.uuid))
dict(vol=self.volume_id, uuid=vios_w.uuid),
instance=self.instance)
device_name = None
try:
device_name = self._get_devname()
@ -158,13 +161,12 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
instance=self.instance)
return False
except Exception as e:
LOG.warning(
"Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s."
" Error: %(error)s",
{'error': e, 'vios_name': vios_w.name,
'volume_id': self.volume_id}, instance=self.instance)
except Exception:
LOG.exception(
"Disconnect Volume: Failed to find device on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s.",
{'vios_name': vios_w.name, 'volume_id': self.volume_id},
instance=self.instance)
return False
# We have found the device name
@ -203,11 +205,9 @@ class IscsiVolumeAdapter(volume.VscsiVolumeAdapter,
if not any([result['vio_modified']
for result in ret['wrapper_task_rets'].values()]):
LOG.warning(
"Disconnect Volume: Failed to disconnect the volume "
"%(volume_id)s on ANY of the Virtual I/O Servers for "
"instance %(inst)s.",
{'inst': self.instance.name, 'volume_id': self.volume_id},
instance=self.instance)
"Disconnect Volume: Failed to disconnect the volume "
"%(volume_id)s on ANY of the Virtual I/O Servers.",
{'volume_id': self.volume_id}, instance=self.instance)
except Exception as e:
LOG.exception('PowerVM error detaching volume from virtual '

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -31,6 +31,7 @@ from nova_powervm.virt.powervm import exception as exc
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.volume import driver as v_driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@ -299,11 +300,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
return False
# At this point, it should be correct.
LOG.info("Instance %(inst)s has not yet defined a WWPN on "
"fabric %(fabric)s. Appropriate WWPNs will be "
"generated.",
{'inst': self.instance.name, 'fabric': fabric},
instance=self.instance)
LOG.info("Instance has not yet defined a WWPN on fabric %(fabric)s. "
"Appropriate WWPNs will be generated.",
{'fabric': fabric}, instance=self.instance)
return True
def _hosts_wwpn(self, port_maps):
@ -399,10 +398,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# were already configured.
for fabric in self._fabric_names():
fc_state = self._get_fabric_state(fabric)
LOG.info(
"NPIV wwpns fabric state=%(st)s for instance %(inst)s",
{'st': fc_state, 'inst': self.instance.name},
instance=self.instance)
LOG.info("NPIV wwpns fabric state=%(st)s.",
{'st': fc_state}, instance=self.instance)
if self._is_initial_wwpn(fc_state, fabric):
# Get a set of WWPNs that are globally unique from the system.
@ -468,12 +465,13 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# set of VIOSs.
if all(pvm_vfcm.find_vios_for_wwpn(vios_wraps, pm[0])[0]
for pm in npiv_port_maps):
LOG.debug("Physical ports check out - just return maps.")
LOG.debug("All physical ports were found on the given Virtual I/O "
"Server(s).", instance=self.instance)
return npiv_port_maps
# If ANY of the VIOS ports were not there, rebuild the port maps
LOG.debug("Rebuild existing_npiv_port_maps=%s. Reset fabric state.",
npiv_port_maps)
npiv_port_maps, instance=self.instance)
v_wwpns = []
for port_map in npiv_port_maps:
v_wwpns.extend(port_map[1].split())
@ -482,7 +480,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# Derive new maps and don't preserve existing maps
npiv_port_maps = pvm_vfcm.derive_npiv_map(
vios_wraps, self._fabric_ports(fabric), v_wwpns, preserve=False)
LOG.debug("Rebuilt port maps: %s", npiv_port_maps)
LOG.debug("Rebuilt port maps: %s", npiv_port_maps,
instance=self.instance)
self._set_fabric_meta(fabric, npiv_port_maps)
LOG.warning("Had to update the system metadata for the WWPNs due to "
"incorrect physical WWPNs on fabric %(fabric)s",
@ -583,10 +582,9 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
pvm_vfcm.remove_maps, self.vm_uuid,
port_map=npiv_port_map, logspec=ls)
else:
LOG.warning("No storage connections found between the "
"Virtual I/O Servers and FC Fabric "
"%(fabric)s.", {'fabric': fabric},
instance=self.instance)
LOG.warning("No storage connections found between the Virtual "
"I/O Servers and FC Fabric %(fabric)s.",
{'fabric': fabric}, instance=self.instance)
def _set_fabric_state(self, fabric, state):
"""Sets the fabric state into the instance's system metadata.
@ -599,9 +597,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
FS_INST_MAPPED: Fabric is mapped with the nova instance.
"""
meta_key = self._sys_fabric_state_key(fabric)
LOG.info("Setting Fabric state=%(st)s for instance=%(inst)s",
{'st': state, 'inst': self.instance.name},
instance=self.instance)
LOG.info("Setting Fabric state=%(st)s.",
{'st': state}, instance=self.instance)
self.instance.system_metadata[meta_key] = state
def _get_fabric_state(self, fabric):
@ -651,10 +648,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
meta_elems.append(p_wwpn)
meta_elems.extend(v_wwpn.split())
LOG.info("Fabric %(fabric)s wwpn metadata will be set to "
"%(meta)s for instance %(inst)s",
{'fabric': fabric, 'meta': ",".join(meta_elems),
'inst': self.instance.name},
LOG.info("Fabric %(fabric)s wwpn metadata will be set to %(meta)s.",
{'fabric': fabric, 'meta': ",".join(meta_elems)},
instance=self.instance)
# Clear out the original metadata. We may be reducing the number of
@ -696,12 +691,10 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
if self.instance.system_metadata.get(meta_key) is None:
# If no mappings exist, log a warning.
LOG.warning("No NPIV mappings exist for instance %(inst)s on "
"fabric %(fabric)s. May not have connected to "
"the fabric yet or fabric configuration was "
"recently modified.",
{'inst': self.instance.name, 'fabric': fabric},
instance=self.instance)
LOG.warning("No NPIV mappings exist for instance on fabric "
"%(fabric)s. May not have connected to the fabric "
"yet or fabric configuration was recently modified.",
{'fabric': fabric}, instance=self.instance)
return []
wwpns = self.instance.system_metadata[meta_key]

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -28,6 +28,8 @@ from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.utils import transaction as tx
from pypowervm.wrappers import storage as pvm_stor
from pypowervm.wrappers import virtual_io_server as pvm_vios
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
UDID_KEY = 'target_UDID'
@ -142,9 +144,8 @@ class VscsiVolumeAdapter(object):
for a particular bus, or none of them.
"""
def add_func(vios_w):
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s "
"to VM %(vm)s", {'dev': device_name,
'vm': self.vm_uuid})
LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s",
{'dev': device_name}, instance=self.instance)
pv = pvm_stor.PV.bld(self.adapter, device_name, udid)
v_map = tsk_map.build_vscsi_mapping(
self.host_uuid, vios_w, self.vm_uuid, pv,
@ -163,7 +164,7 @@ class VscsiVolumeAdapter(object):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id)
'%s', self.volume_id, instance=self.instance)
return None
def _set_udid(self, udid):
@ -184,7 +185,7 @@ class VscsiVolumeAdapter(object):
# It's common to lose our specific data in the BDM. The connection
# information can be 'refreshed' by operations like LPM and resize
LOG.info('Failed to retrieve device_id key from BDM for volume id '
'%s', self.volume_id)
'%s', self.volume_id, instance=self.instance)
return None
def _set_devname(self, devname):
@ -205,8 +206,8 @@ class VscsiVolumeAdapter(object):
used when a volume is detached from the VM.
"""
def rm_func(vios_w):
LOG.info("Removing vSCSI mapping from Physical Volume %(dev)s "
"to VM %(vm)s", {'dev': device_name, 'vm': vm_uuid})
LOG.info("Removing vSCSI mapping from physical volume %(dev)s.",
{'dev': device_name}, instance=self.instance)
removed_maps = tsk_map.remove_maps(
vios_w, vm_uuid,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
@ -229,17 +230,19 @@ class VscsiVolumeAdapter(object):
:param stg_ftsk: The feed task to add to. If None, then self.stg_ftsk
"""
def rm_hdisk():
LOG.info("Running remove for hdisk: '%s'", device_name)
LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
"%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
try:
# Attempt to remove the hDisk
hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
vio_wrap.uuid)
except Exception as e:
except Exception:
# If there is a failure, log it, but don't stop the process
LOG.warning("There was an error removing the hdisk "
"%(disk)s from the Virtual I/O Server.",
{'disk': device_name})
LOG.warning(e)
LOG.exception("There was an error removing the hdisk "
"%(disk)s from Virtual I/O Server %(vios)s.",
{'disk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
# Check if there are not multiple mapping for the device
if not self._check_host_mappings(vio_wrap, device_name):
@ -247,8 +250,10 @@ class VscsiVolumeAdapter(object):
stg_ftsk = stg_ftsk or self.stg_ftsk
stg_ftsk.add_post_execute(task.FunctorTask(rm_hdisk, name=name))
else:
LOG.info("hdisk %(disk)s is not removed because it has "
"existing storage mappings", {'disk': device_name})
LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
"%(vios)s because it has existing storage mappings",
{'disk': device_name, 'vios': vio_wrap.name},
instance=self.instance)
def _check_host_mappings(self, vios_wrap, device_name):
"""Checks if the given hdisk has multiple mappings
@ -265,8 +270,9 @@ class VscsiVolumeAdapter(object):
vios_scsi_mappings, None,
tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
LOG.info("%(num)d Storage Mappings found for %(dev)s",
{'num': len(mappings), 'dev': device_name})
LOG.info("%(num)d storage mappings found for %(dev)s on VIOS %(vios)s",
{'num': len(mappings), 'dev': device_name,
'vios': vios_wrap.name}, instance=self.instance)
# the mapping is still present as the task feed removes
# the mapping later
return len(mappings) > 1
@ -275,11 +281,11 @@ class VscsiVolumeAdapter(object):
"""Cleanup the hdisk associated with this udid."""
if not udid and not devname:
LOG.warning(
'Could not remove hdisk for volume: %s', self.volume_id)
LOG.warning('Could not remove hdisk for volume %s', self.volume_id,
instance=self.instance)
return
LOG.info('Removing hdisk for udid: %s', udid)
LOG.info('Removing hdisk for udid: %s', udid, instance=self.instance)
def find_hdisk_to_remove(vios_w):
if devname is None:
@ -288,8 +294,9 @@ class VscsiVolumeAdapter(object):
device_name = devname
if device_name is None:
return
LOG.info('Removing %(hdisk)s from VIOS %(vios)s',
{'hdisk': device_name, 'vios': vios_w.name})
LOG.info('Adding deferred task to remove %(hdisk)s from VIOS '
'%(vios)s.', {'hdisk': device_name, 'vios': vios_w.name},
instance=self.instance)
self._add_remove_hdisk(vios_w, device_name,
stg_ftsk=rmv_hdisk_ftsk)

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2016 IBM Corp.
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
@ -30,6 +30,8 @@ from pypowervm.utils import transaction as tx
from pypowervm.wrappers import virtual_io_server as pvm_vios
import six
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -178,15 +180,16 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
if len(itls) == 0:
LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
{'vios': vios_w.name, 'volume_id': volume_id})
{'vios': vios_w.name, 'volume_id': volume_id},
instance=self.instance)
return None, None, None
status, device_name, udid = hdisk.discover_hdisk(self.adapter,
vios_w.uuid, itls)
if hdisk.good_discovery(status, device_name):
LOG.info('Discovered %(hdisk)s on vios %(vios)s for '
'volume %(volume_id)s. Status code: %(status)s.',
LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
'%(volume_id)s. Status code: %(status)s.',
{'hdisk': device_name, 'vios': vios_w.name,
'volume_id': volume_id, 'status': str(status)},
instance=self.instance)
@ -217,9 +220,10 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
slot, lua = slot_mgr.build_map.get_vscsi_slot(vios_w, udid)
if slot_mgr.is_rebuild and not slot:
LOG.debug('Detected a device with UDID %s on VIOS %s on the '
'rebuild that did not exist on the source. '
'Ignoring.', udid, vios_w.uuid)
LOG.debug('Detected a device with UDID %(udid)s on VIOS '
'%(vios)s on the rebuild that did not exist on the '
'source. Ignoring.', {'udid': udid, 'vios': vios_w.uuid},
instance=self.instance)
return False
if hdisk.good_discovery(status, device_name):
@ -232,7 +236,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
# Save the UDID for the disk in the connection info. It is
# used for the detach.
self._set_udid(udid)
LOG.debug('Device attached: %s', device_name)
LOG.debug('Added deferred task to attach device %s', device_name,
instance=self.instance)
# Valid attachment
return True
@ -253,7 +258,8 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
otherwise.
"""
LOG.debug("Disconnect volume %(vol)s from vios uuid %(uuid)s",
dict(vol=self.volume_id, uuid=vios_w.uuid))
dict(vol=self.volume_id, uuid=vios_w.uuid),
instance=self.instance)
udid, device_name = None, None
try:
udid = self._get_udid()
@ -281,12 +287,12 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
instance=self.instance)
return False
except Exception as e:
LOG.warning(
except Exception:
LOG.exception(
"Disconnect Volume: Failed to find disk on Virtual I/O "
"Server %(vios_name)s for volume %(volume_id)s. Volume "
"UDID: %(volume_uid)s. Error: %(error)s",
{'error': e, 'volume_uid': udid, 'vios_name': vios_w.name,
"UDID: %(volume_uid)s.",
{'volume_uid': udid, 'vios_name': vios_w.name,
'volume_id': self.volume_id}, instance=self.instance)
return False
@ -326,9 +332,7 @@ class PVVscsiFCVolumeAdapter(volume.VscsiVolumeAdapter,
for result in ret['wrapper_task_rets'].values()]):
LOG.warning("Disconnect Volume: Failed to disconnect the "
"volume %(volume_id)s on ANY of the Virtual "
"I/O Servers for instance %(inst)s.",
{'inst': self.instance.name,
'volume_id': self.volume_id},
"I/O Servers.", {'volume_id': self.volume_id},
instance=self.instance)
except Exception as e: