Fixing UT failure.

Change-Id: Ia733c160da57ab07149c48a3b886505c5c557d7c
This commit is contained in:
amansi26 2019-06-07 05:27:51 -04:00
parent 61b7bdd343
commit 98630ef89e
23 changed files with 103 additions and 87 deletions

View File

@ -1,3 +1,4 @@
sphinx!=1.6.6,!=1.6.7,>=1.6.5 # BSD sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD
sphinx!=1.6.6,!=1.6.7,>=1.6.2,!=2.1.0;python_version>='3.4' # BSD
openstackdocstheme>=1.19.0 # Apache-2.0 openstackdocstheme>=1.19.0 # Apache-2.0
sphinx-feature-classification>=0.2.0 # Apache-2.0 sphinx-feature-classification>=0.2.0 # Apache-2.0

View File

@ -69,6 +69,7 @@ numpy==1.14.2
openstacksdk==0.12.0 openstacksdk==0.12.0
os-brick==2.5.0 os-brick==2.5.0
os-client-config==1.29.0 os-client-config==1.29.0
os-resource-classes==0.1.0 # Apache-2.0
os-service-types==1.2.0 os-service-types==1.2.0
os-traits==0.4.0 os-traits==0.4.0
os-vif==1.7.0 os-vif==1.7.0
@ -112,6 +113,7 @@ py==1.5.2
pyasn1-modules==0.2.1 pyasn1-modules==0.2.1
pyasn1==0.4.2 pyasn1==0.4.2
pycadf==2.7.0 pycadf==2.7.0
pycodestyle==2.0.0
pycparser==2.18 pycparser==2.18
pyflakes==0.8.1 pyflakes==0.8.1
pyinotify==0.9.6 pyinotify==0.9.6
@ -146,6 +148,7 @@ Routes==2.3.1
simplejson==3.13.2 simplejson==3.13.2
six==1.10.0 six==1.10.0
smmap2==2.0.3 smmap2==2.0.3
Sphinx==1.6.2
sqlalchemy-migrate==0.11.0 sqlalchemy-migrate==0.11.0
SQLAlchemy==1.0.10 SQLAlchemy==1.0.10
sqlparse==0.2.4 sqlparse==0.2.4

View File

@ -200,6 +200,7 @@ vnc_opts = [
STATIC_OPTIONS = (powervm_opts + ssp_opts + vol_adapter_opts + npiv_opts STATIC_OPTIONS = (powervm_opts + ssp_opts + vol_adapter_opts + npiv_opts
+ remote_restart_opts + swift_opts + vnc_opts) + remote_restart_opts + swift_opts + vnc_opts)
# Dictionary where the key is the NPIV Fabric Name, and the value is a list of # Dictionary where the key is the NPIV Fabric Name, and the value is a list of
# Physical WWPNs that match the key. # Physical WWPNs that match the key.
NPIV_FABRIC_WWPNS = {} NPIV_FABRIC_WWPNS = {}

View File

@ -81,14 +81,13 @@ EMPTY_IMAGE = image_meta.ImageMeta.from_dict({})
# environment then eventlet monkeypatches socket.getaddrinfo() with an # environment then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here is # implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened. # that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and if ('eventlet' in sys.modules):
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): if (os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before nova/cmd/__init__ ' raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)' '(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS')) % os.environ.get('EVENTLET_NO_GREENDNS'))
os.environ['EVENTLET_NO_GREENDNS'] = 'yes' os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet import eventlet
eventlet.monkey_patch(os=False) eventlet.monkey_patch(os=False)

View File

@ -42,15 +42,15 @@ class TestLocalDisk(test.NoDBTestCase):
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
# Set up mock for internal VIOS.get()s
self.mock_vios_get = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.virtual_io_server.VIOS',
autospec=True)).mock.get
# The mock VIOS needs to have scsi_mappings as a list. Internals are # The mock VIOS needs to have scsi_mappings as a list. Internals are
# set by individual test cases as needed. # set by individual test cases as needed.
smaps = [mock.Mock()] smaps = [mock.Mock()]
self.vio_to_vg = mock.Mock(spec=pvm_vios.VIOS, scsi_mappings=smaps, self.vio_to_vg = mock.Mock(spec=pvm_vios.VIOS, scsi_mappings=smaps,
uuid='vios-uuid') uuid='vios-uuid')
# Set up mock for internal VIOS.get()s
self.mock_vios_get = self.useFixture(fixtures.MockPatch(
'pypowervm.wrappers.virtual_io_server.VIOS',
autospec=True)).mock.get
# For our tests, we want find_maps to return the mocked list of scsi # For our tests, we want find_maps to return the mocked list of scsi
# mappings in our mocked VIOS. # mappings in our mocked VIOS.
self.mock_find_maps = self.useFixture(fixtures.MockPatch( self.mock_find_maps = self.useFixture(fixtures.MockPatch(

View File

@ -2206,12 +2206,13 @@ class TestPowerVMDriver(test.NoDBTestCase):
def test_get_host_cpu_stats(self): def test_get_host_cpu_stats(self):
hcpu_stats = self.drv.get_host_cpu_stats() hcpu_stats = self.drv.get_host_cpu_stats()
total_cycles = self.drv.host_cpu_cache.total_cycles
total_user_cycles = self.drv.host_cpu_cache.total_user_cycles
total_fw_cycles = self.drv.host_cpu_cache.total_fw_cycles
expected_stats = { expected_stats = {
'kernel': self.drv.host_cpu_cache.total_fw_cycles, 'kernel': self.drv.host_cpu_cache.total_fw_cycles,
'user': self.drv.host_cpu_cache.total_user_cycles, 'user': self.drv.host_cpu_cache.total_user_cycles,
'idle': (self.drv.host_cpu_cache.total_cycles - 'idle': (total_cycles - total_user_cycles - total_fw_cycles),
self.drv.host_cpu_cache.total_user_cycles -
self.drv.host_cpu_cache.total_fw_cycles),
'iowait': 0, 'iowait': 0,
'frequency': self.drv.host_cpu_cache.cpu_freq} 'frequency': self.drv.host_cpu_cache.cpu_freq}
self.assertEqual(expected_stats, hcpu_stats) self.assertEqual(expected_stats, hcpu_stats)

View File

@ -81,7 +81,7 @@ class TestLPM(test.NoDBTestCase):
self.host_mig_data['active_migrations_in_progress'] = 2 self.host_mig_data['active_migrations_in_progress'] = 2
with mock.patch.object( with mock.patch.object(
self.lpmsrc, '_check_migration_ready', return_value=None): self.lpmsrc, '_check_migration_ready', return_value=None):
# Test the bad path first, then patch in values to make succeed # Test the bad path first, then patch in values to make succeed
mock_wrap = mock.Mock(id=123) mock_wrap = mock.Mock(id=123)

View File

@ -111,8 +111,9 @@ class TestFileIOVolumeAdapter(test_vol.TestVolumeAdapter):
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition') @mock.patch('pypowervm.tasks.partition.get_mgmt_partition')
@mock.patch('pypowervm.wrappers.storage.FileIO.bld') @mock.patch('pypowervm.wrappers.storage.FileIO.bld')
def test_connect_volume_rebuild_no_slot( def test_connect_volume_rebuild_no_slot(
self, mock_file_bld, mock_get_mgmt_partition, mock_get_vm_id, self, mock_file_bld, mock_get_mgmt_partition, mock_get_vm_id,
mock_udid_to_map, mock_reg_map, mock_get_vios_uuid, mock_build_map): mock_udid_to_map, mock_reg_map, mock_get_vios_uuid,
mock_build_map):
# Mockups # Mockups
mock_file = mock.Mock() mock_file = mock.Mock()
mock_file_bld.return_value = mock_file mock_file_bld.return_value = mock_file

View File

@ -113,8 +113,9 @@ class TestRBDVolumeAdapter(test_vol.TestVolumeAdapter):
@mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True) @mock.patch('pypowervm.tasks.partition.get_mgmt_partition', autospec=True)
@mock.patch('pypowervm.wrappers.storage.RBD.bld_ref') @mock.patch('pypowervm.wrappers.storage.RBD.bld_ref')
def test_connect_volume_rebuild_no_slot( def test_connect_volume_rebuild_no_slot(
self, mock_rbd_bld_ref, mock_get_mgmt_partition, mock_get_vm_id, self, mock_rbd_bld_ref, mock_get_mgmt_partition, mock_get_vm_id,
mock_udid_to_map, mock_reg_map, mock_get_vios_uuid, mock_build_map): mock_udid_to_map, mock_reg_map, mock_get_vios_uuid,
mock_build_map):
# Mockups # Mockups
mock_rbd = mock.Mock() mock_rbd = mock.Mock()
mock_rbd_bld_ref.return_value = mock_rbd mock_rbd_bld_ref.return_value = mock_rbd

View File

@ -234,8 +234,8 @@ class TestVSCSIAdapter(BaseVSCSITest):
'._validate_vios_on_connection') '._validate_vios_on_connection')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_connect_volume_no_update( def test_connect_volume_no_update(
self, mock_get_vm_id, mock_validate_vioses, mock_disc_hdisk, self, mock_get_vm_id, mock_validate_vioses, mock_disc_hdisk,
mock_build_map, mock_add_map): mock_build_map, mock_add_map):
"""Make sure we don't do an actual update of the VIOS if not needed.""" """Make sure we don't do an actual update of the VIOS if not needed."""
# The mock return values # The mock return values
mock_build_map.return_value = 'fake_map' mock_build_map.return_value = 'fake_map'
@ -259,8 +259,8 @@ class TestVSCSIAdapter(BaseVSCSITest):
'._validate_vios_on_connection') '._validate_vios_on_connection')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_id') @mock.patch('nova_powervm.virt.powervm.vm.get_vm_id')
def test_connect_volume_to_initiators( def test_connect_volume_to_initiators(
self, mock_get_vm_id, mock_validate_vioses, mock_add_vscsi_mapping, self, mock_get_vm_id, mock_validate_vioses,
mock_build_itls): mock_add_vscsi_mapping, mock_build_itls):
"""Tests that the connect w/out initiators throws errors.""" """Tests that the connect w/out initiators throws errors."""
mock_get_vm_id.return_value = 'partition_id' mock_get_vm_id.return_value = 'partition_id'
@ -393,8 +393,8 @@ class TestVSCSIAdapter(BaseVSCSITest):
mock_remove_maps.side_effect = validate_remove_maps mock_remove_maps.side_effect = validate_remove_maps
with mock.patch.object( with mock.patch.object(
self.vol_drv, '_discover_volume_on_vios', self.vol_drv, '_discover_volume_on_vios',
return_value=('status', 'dev_name', 'udidit')): return_value=('status', 'dev_name', 'udidit')):
# Run the method # Run the method
self.vol_drv.disconnect_volume(self.slot_mgr) self.vol_drv.disconnect_volume(self.slot_mgr)
@ -413,8 +413,8 @@ class TestVSCSIAdapter(BaseVSCSITest):
"""Ensures that if the UDID can not be found, no disconnect.""" """Ensures that if the UDID can not be found, no disconnect."""
mock_good_discover.return_value = False mock_good_discover.return_value = False
with mock.patch.object( with mock.patch.object(
self.vol_drv, '_discover_volume_on_vios', self.vol_drv, '_discover_volume_on_vios',
return_value=('status', 'dev_name', None)): return_value=('status', 'dev_name', None)):
# Run the method # Run the method
self.vol_drv.disconnect_volume(self.slot_mgr) self.vol_drv.disconnect_volume(self.slot_mgr)

View File

@ -344,7 +344,7 @@ class DiskAdapter(object):
context, instance, image_meta, image_type=image_type) context, instance, image_meta, image_type=image_type)
except Exception: except Exception:
with excutils.save_and_reraise_exception( with excutils.save_and_reraise_exception(
logger=LOG, reraise=False) as sare: logger=LOG, reraise=False) as sare:
if attempt < 4: if attempt < 4:
LOG.exception("Disk Upload attempt #%d failed. " LOG.exception("Disk Upload attempt #%d failed. "
"Retrying the upload.", attempt, "Retrying the upload.", attempt,

View File

@ -23,10 +23,10 @@ from nova import context as ctx
from nova import exception from nova import exception
from nova import image from nova import image
from nova import objects from nova import objects
from nova import rc_fields
from nova import utils as n_utils from nova import utils as n_utils
from nova.virt import configdrive from nova.virt import configdrive
from nova.virt import driver from nova.virt import driver
import os_resource_classes as orc
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
import six import six
@ -299,12 +299,13 @@ class PowerVMDriver(driver.ComputeDriver):
def get_host_cpu_stats(self): def get_host_cpu_stats(self):
"""Return the current CPU state of the host.""" """Return the current CPU state of the host."""
self.host_cpu_cache.refresh() self.host_cpu_cache.refresh()
total_cycles = self.host_cpu_cache.total_cycles
total_user_cycles = self.host_cpu_cache.total_user_cycles
total_fw_cycles = self.host_cpu_cache.total_fw_cycles
return { return {
'kernel': self.host_cpu_cache.total_fw_cycles, 'kernel': self.host_cpu_cache.total_fw_cycles,
'user': self.host_cpu_cache.total_user_cycles, 'user': self.host_cpu_cache.total_user_cycles,
'idle': (self.host_cpu_cache.total_cycles - 'idle': (total_cycles - total_user_cycles - total_fw_cycles),
self.host_cpu_cache.total_user_cycles -
self.host_cpu_cache.total_fw_cycles),
# Not reported by PowerVM # Not reported by PowerVM
'iowait': 0, 'iowait': 0,
'frequency': self.host_cpu_cache.cpu_freq} 'frequency': self.host_cpu_cache.cpu_freq}
@ -384,8 +385,9 @@ class PowerVMDriver(driver.ComputeDriver):
flow_spawn = tf_lf.Flow("spawn") flow_spawn = tf_lf.Flow("spawn")
# Determine if this is a VM recreate # Determine if this is a VM recreate
recreate = (instance.task_state == task_states.REBUILD_SPAWNING and task_state = instance.task_state
'id' not in image_meta) rebuild_spawning = task_states.REBUILD_SPAWNING
recreate = (task_state == rebuild_spawning and 'id' not in image_meta)
# Create the transaction manager (FeedTask) for Storage I/O. # Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms, recreate=recreate) xag = self._get_inst_xag(instance, bdms, recreate=recreate)
@ -401,9 +403,11 @@ class PowerVMDriver(driver.ComputeDriver):
vol_drv_iter=vol_drv_iter) vol_drv_iter=vol_drv_iter)
# Create the LPAR, check if NVRAM restore is needed. # Create the LPAR, check if NVRAM restore is needed.
nvram_mgr = (self.nvram_mgr if self.nvram_mgr and vm_state = instance.vm_state
(recreate or instance.vm_state in FETCH_NVRAM_STATES) if self.nvram_mgr and (recreate or vm_state in FETCH_NVRAM_STATES):
else None) nvram_mgr = self.nvram_mgr
else:
nvram_mgr = None
# If we're recreating pass None in for the FeedTask. This will make the # If we're recreating pass None in for the FeedTask. This will make the
# Create task produce a FeedTask that will be used to scrub stale # Create task produce a FeedTask that will be used to scrub stale
@ -550,7 +554,6 @@ class PowerVMDriver(driver.ComputeDriver):
def _destroy(self, context, instance, block_device_info=None, def _destroy(self, context, instance, block_device_info=None,
network_info=None, destroy_disks=True, shutdown=True): network_info=None, destroy_disks=True, shutdown=True):
"""Internal destroy method used by multiple operations. """Internal destroy method used by multiple operations.
:param context: security context :param context: security context
@ -652,14 +655,13 @@ class PowerVMDriver(driver.ComputeDriver):
# the prior step. # the prior step.
flow.add(tf_vm.Delete(self.adapter, instance)) flow.add(tf_vm.Delete(self.adapter, instance))
if (destroy_disks and if (destroy_disks and instance.vm_state not in KEEP_NVRAM_STATES):
instance.vm_state not in KEEP_NVRAM_STATES and if instance.host in [None, CONF.host]:
instance.host in [None, CONF.host]): # If the disks are being destroyed and not one of the
# If the disks are being destroyed and not one of the # operations that we should keep the NVRAM around for, then
# operations that we should keep the NVRAM around for, then # it's probably safe to delete the NVRAM from the store.
# it's probably safe to delete the NVRAM from the store. flow.add(tf_vm.DeleteNvram(self.nvram_mgr, instance))
flow.add(tf_vm.DeleteNvram(self.nvram_mgr, instance)) flow.add(tf_slot.DeleteSlotStore(instance, slot_mgr))
flow.add(tf_slot.DeleteSlotStore(instance, slot_mgr))
# Run the flow # Run the flow
tf_base.run(flow, instance=instance) tf_base.run(flow, instance=instance)
@ -672,11 +674,11 @@ class PowerVMDriver(driver.ComputeDriver):
instance=instance) instance=instance)
return return
except Exception as e: except Exception as e:
LOG.exception("PowerVM error destroying instance.", LOG.exception("PowerVM error destroying instance.",
instance=instance) instance=instance)
# Convert to a Nova exception # Convert to a Nova exception
raise exception.InstanceTerminationFailure( raise exception.InstanceTerminationFailure(
reason=six.text_type(e)) reason=six.text_type(e))
def destroy(self, context, instance, network_info, block_device_info=None, def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None): destroy_disks=True, migrate_data=None):
@ -1020,19 +1022,19 @@ class PowerVMDriver(driver.ComputeDriver):
disk_reserved = self._get_reserved_host_disk_gb_from_config() disk_reserved = self._get_reserved_host_disk_gb_from_config()
inventory = { inventory = {
rc_fields.ResourceClass.VCPU: { orc.VCPU: {
'total': data['vcpus'], 'total': data['vcpus'],
'max_unit': data['vcpus'], 'max_unit': data['vcpus'],
'allocation_ratio': cpu_alloc_ratio, 'allocation_ratio': cpu_alloc_ratio,
'reserved': cpu_reserved, 'reserved': cpu_reserved,
}, },
rc_fields.ResourceClass.MEMORY_MB: { orc.MEMORY_MB: {
'total': data['memory_mb'], 'total': data['memory_mb'],
'max_unit': data['memory_mb'], 'max_unit': data['memory_mb'],
'allocation_ratio': mem_alloc_ratio, 'allocation_ratio': mem_alloc_ratio,
'reserved': mem_reserved, 'reserved': mem_reserved,
}, },
rc_fields.ResourceClass.DISK_GB: { orc.DISK_GB: {
# TODO(efried): Proper DISK_GB sharing when SSP driver in play # TODO(efried): Proper DISK_GB sharing when SSP driver in play
'total': int(data['local_gb']), 'total': int(data['local_gb']),
'max_unit': int(data['local_gb']), 'max_unit': int(data['local_gb']),
@ -1205,7 +1207,7 @@ class PowerVMDriver(driver.ComputeDriver):
# If VM is moving to a new host make sure the NVRAM is at the very # If VM is moving to a new host make sure the NVRAM is at the very
# latest. # latest.
flow.add(tf_vm.StoreNvram(self.nvram_mgr, instance, flow.add(tf_vm.StoreNvram(self.nvram_mgr, instance,
immediate=True)) immediate=True))
if flavor.root_gb > instance.root_gb: if flavor.root_gb > instance.root_gb:
# Resize the root disk # Resize the root disk
flow.add(tf_stg.ExtendDisk( flow.add(tf_stg.ExtendDisk(
@ -1292,8 +1294,8 @@ class PowerVMDriver(driver.ComputeDriver):
self._log_operation('finish migration', instance) self._log_operation('finish migration', instance)
# Ensure the disk drivers are compatible. # Ensure the disk drivers are compatible.
if (not same_host and booted_from_vol = self._is_booted_from_volume(block_device_info)
not self._is_booted_from_volume(block_device_info)): if (not same_host and not booted_from_vol):
# Can't migrate the disks if they are not on shared storage # Can't migrate the disks if they are not on shared storage
if not self.disk_dvr.capabilities['shared_storage']: if not self.disk_dvr.capabilities['shared_storage']:
raise exception.InstanceFaultRollback( raise exception.InstanceFaultRollback(

View File

@ -64,9 +64,8 @@ def build_host_resource_from_ms(ms_wrapper):
data['vcpus_used'] = int(math.ceil(pu_used)) data['vcpus_used'] = int(math.ceil(pu_used))
data['memory_mb'] = ms_wrapper.memory_configurable data['memory_mb'] = ms_wrapper.memory_configurable
data['memory_mb_used'] = (ms_wrapper.memory_configurable - used_memory = ms_wrapper.memory_configurable - ms_wrapper.memory_free
ms_wrapper.memory_free) data['memory_mb_used'] = used_memory
data["hypervisor_type"] = fields.HVType.PHYP data["hypervisor_type"] = fields.HVType.PHYP
data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION
data["hypervisor_hostname"] = CONF.host data["hypervisor_hostname"] = CONF.host

View File

@ -54,8 +54,9 @@ class LiveMigrationVolume(exception.NovaException):
def _verify_migration_capacity(host_w, instance): def _verify_migration_capacity(host_w, instance):
"""Check that the counts are valid for in progress and supported.""" """Check that the counts are valid for in progress and supported."""
mig_stats = host_w.migration_data mig_stats = host_w.migration_data
if (mig_stats['active_migrations_in_progress'] >= active_migrations_in_progress = mig_stats['active_migrations_in_progress']
mig_stats['active_migrations_supported']): active_migrations_supported = mig_stats['active_migrations_supported']
if (active_migrations_in_progress >= active_migrations_supported):
msg = (_("Cannot migrate %(name)s because the host %(host)s only " msg = (_("Cannot migrate %(name)s because the host %(host)s only "
"allows %(allowed)s concurrent migrations and " "allows %(allowed)s concurrent migrations and "
@ -105,8 +106,9 @@ class LiveMigrationDest(LiveMigration):
src_stats = src_compute_info['stats'] src_stats = src_compute_info['stats']
dst_stats = dst_compute_info['stats'] dst_stats = dst_compute_info['stats']
# Check the lmb sizes for compatibility # Check the lmb sizes for compatibility
if (src_stats['memory_region_size'] != src_memory_region_size = src_stats['memory_region_size']
dst_stats['memory_region_size']): dst_memory_region_size = dst_stats['memory_region_size']
if (src_memory_region_size != dst_memory_region_size):
msg = (_("Cannot migrate instance '%(name)s' because the " msg = (_("Cannot migrate instance '%(name)s' because the "
"memory region size of the source (%(source_mrs)d MB) " "memory region size of the source (%(source_mrs)d MB) "
"does not match the memory region size of the target " "does not match the memory region size of the target "

View File

@ -76,7 +76,7 @@ class ConfigDrivePowerVM(object):
""" """
network_info = copy.deepcopy(network_info) network_info = copy.deepcopy(network_info)
for vif in network_info: for vif in network_info:
if vif.get('type') is not 'ovs': if vif.get('type') != 'ovs':
vif['type'] = 'vif' vif['type'] = 'vif'
return network_info return network_info

View File

@ -26,7 +26,7 @@ import glob
import os import os
from nova import exception from nova import exception
from nova.privsep import path as priv_path import nova.privsep.path
from oslo_concurrency import lockutils from oslo_concurrency import lockutils
from oslo_log import log as logging from oslo_log import log as logging
from pypowervm.tasks import partition as pvm_par from pypowervm.tasks import partition as pvm_par
@ -86,7 +86,7 @@ def discover_vscsi_disk(mapping, scan_timeout=300):
for scanpath in glob.glob( for scanpath in glob.glob(
'/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot): '/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot):
# Writing '- - -' to this sysfs file triggers bus rescan # Writing '- - -' to this sysfs file triggers bus rescan
priv_path.writefile(scanpath, 'a', '- - -') nova.privsep.path.writefile(scanpath, 'a', '- - -')
# Now see if our device showed up. If so, we can reliably match it based # Now see if our device showed up. If so, we can reliably match it based
# on its Linux ID, which ends with the disk's UDID. # on its Linux ID, which ends with the disk's UDID.
@ -151,7 +151,7 @@ def remove_block_dev(devpath, scan_timeout=10):
"partition via special file %(delpath)s.", "partition via special file %(delpath)s.",
{'devpath': devpath, 'delpath': delpath}) {'devpath': devpath, 'delpath': delpath})
# Writing '1' to this sysfs file deletes the block device and rescans. # Writing '1' to this sysfs file deletes the block device and rescans.
priv_path.writefile(delpath, 'a', '1') nova.privsep.path.writefile(delpath, 'a', '1')
# The bus scan is asynchronous. Need to poll, waiting for the device to # The bus scan is asynchronous. Need to poll, waiting for the device to
# disappear. Stop when stat raises OSError (dev file not found) - which is # disappear. Stop when stat raises OSError (dev file not found) - which is

View File

@ -793,10 +793,10 @@ class PvmOvsVifDriver(PvmVifDriver):
for adpt in child_adpts: for adpt in child_adpts:
# We need a trunk adapter (so check trunk_pri). Then the trunk # We need a trunk adapter (so check trunk_pri). Then the trunk
# is unique by PVID and PowerVM vSwitch ID. # is unique by PVID and PowerVM vSwitch ID.
if (adpt.pvid == vlan and adpt.vswitch_id == vswitch_id and if (adpt.pvid == vlan and adpt.vswitch_id == vswitch_id):
adpt.trunk_pri): if adpt.trunk_pri:
trunk = adpt trunk = adpt
break break
if trunk: if trunk:
# Delete the peer'd trunk adapter. # Delete the peer'd trunk adapter.

View File

@ -121,9 +121,9 @@ def translate_event(pvm_state, pwr_state):
trans = event.EVENT_LIFECYCLE_STARTED trans = event.EVENT_LIFECYCLE_STARTED
elif pvm_state in STOPPED_EVENTS and pwr_state != power_state.SHUTDOWN: elif pvm_state in STOPPED_EVENTS and pwr_state != power_state.SHUTDOWN:
trans = event.EVENT_LIFECYCLE_STOPPED trans = event.EVENT_LIFECYCLE_STOPPED
elif (pvm_state in SUSPENDED_EVENTS and elif pvm_state in SUSPENDED_EVENTS:
pwr_state != power_state.SUSPENDED): if pwr_state != power_state.SUSPENDED:
trans = event.EVENT_LIFECYCLE_SUSPENDED trans = event.EVENT_LIFECYCLE_SUSPENDED
elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING: elif pvm_state in RESUMING_EVENTS and pwr_state != power_state.RUNNING:
trans = event.EVENT_LIFECYCLE_RESUMED trans = event.EVENT_LIFECYCLE_RESUMED
@ -377,8 +377,8 @@ class VMBuilder(object):
:param pool_name: The shared proc pool name. :param pool_name: The shared proc pool name.
:return: The internal API id for the shared proc pool. :return: The internal API id for the shared proc pool.
""" """
if (pool_name is None or default_pool_name = pvm_spp.DEFAULT_POOL_DISPLAY_NAME
pool_name == pvm_spp.DEFAULT_POOL_DISPLAY_NAME): if (pool_name is None or pool_name == default_pool_name):
# The default pool is 0 # The default pool is 0
return 0 return 0

View File

@ -66,8 +66,7 @@ def get_iscsi_initiators(adapter, vios_ids=None):
# Get the VIOS id lock for initiator lookup # Get the VIOS id lock for initiator lookup
@lockutils.synchronized('inititator-lookup-' + vios_id) @lockutils.synchronized('inititator-lookup-' + vios_id)
def _discover_initiator(): def _discover_initiator():
if (vios_id in _ISCSI_INITIATORS and if vios_id in _ISCSI_INITIATORS and _ISCSI_INITIATORS[vios_id]:
_ISCSI_INITIATORS[vios_id]):
return return
else: else:
try: try:

View File

@ -95,10 +95,11 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
# of the spawn. We also want to check that the instance is on this # of the spawn. We also want to check that the instance is on this
# host. If it isn't then we can remove the mappings because this is # host. If it isn't then we can remove the mappings because this is
# being called as the result of an evacuation clean up. # being called as the result of an evacuation clean up.
if (self.instance.task_state not in TASK_STATES_FOR_DISCONNECT and if (self.instance.task_state not in TASK_STATES_FOR_DISCONNECT):
self.instance.host in [None, CONF.host]): if (self.instance.host in [None, CONF.host]):
# NPIV should only remove the VFC mapping upon a destroy of the VM # NPIV should only remove the VFC mapping upon a destroy of
return # the VM
return
# Run the disconnect for each fabric # Run the disconnect for each fabric
for fabric in self._fabric_names(): for fabric in self._fabric_names():
@ -336,8 +337,7 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter):
:return: True if the instance appears to be migrating to this host. :return: True if the instance appears to be migrating to this host.
False otherwise. False otherwise.
""" """
return (fc_state == FS_INST_MAPPED and return fc_state == FS_INST_MAPPED and self.instance.host != CONF.host
self.instance.host != CONF.host)
def _configure_wwpns_for_migration(self, fabric): def _configure_wwpns_for_migration(self, fabric):
"""Configures the WWPNs for a migration. """Configures the WWPNs for a migration.

View File

@ -8,7 +8,10 @@ oslo.config>=5.2.0 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.utils>=3.37.0 # Apache-2.0 oslo.utils>=3.37.0 # Apache-2.0
os-resource-classes>=0.1.0 # Apache-2.0
pypowervm>=1.1.20 # Apache-2.0 pypowervm>=1.1.20 # Apache-2.0
sphinx!=1.6.6,!=1.6.7,<2.0.0,>=1.6.2;python_version=='2.7' # BSD
sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD
python-swiftclient>=3.2.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0
taskflow>=2.16.0 # Apache-2.0 taskflow>=2.16.0 # Apache-2.0
setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL

View File

@ -1,12 +1,15 @@
# The order of packages is significant, because pip processes them in the order # The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration # of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later. # process, which may cause wedges in the gate later.
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0 hacking>=1.1.0,<1.2.0 # Apache-2.0
bashate>=0.5.1 # Apache-2.0 bashate>=0.5.1 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD fixtures>=3.0.0 # Apache-2.0/BSD
oslotest>=3.2.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0
sphinx!=1.6.6,!=1.6.7,<2.0.0,>=1.6.2;python_version=='2.7' # BSD
sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD
stestr>=1.0.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0
testscenarios>=0.4 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD
testtools>=2.2.0 # MIT testtools>=2.2.0 # MIT
mock>=2.0.0 # BSD mock>=2.0.0 # BSD
pycodestyle>=2.0.0 # MIT License

View File

@ -89,7 +89,8 @@ whitelist_externals = bash
[flake8] [flake8]
# N342 - Config Opts need to be outside nova/conf until powervm is part of nova proper # N342 - Config Opts need to be outside nova/conf until powervm is part of nova proper
ignore = E125,N342 # E402 module level import not at top of file
ignore = E125,N342,W504,W503,E402
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
[hacking] [hacking]