Switch nova.virt.libvirt.* to instance dot notation

Fix modules under nova/virt/libvirt/* to use instance objects
with the field access dot notation everywhere. Essentially we use
instance.key instead of instance['key']. Needed to rework some of
the test cases to get them working.

Change-Id: I942dc6c91681e5791bc2f44bbe6df841bb48d38f
This commit is contained in:
Davanum Srinivas 2015-02-25 21:34:01 -05:00
parent 8fe3c206b0
commit 6ab220b3a8
11 changed files with 44 additions and 45 deletions

View File

@ -63,6 +63,7 @@ from nova.openstack.common import uuidutils
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
@ -312,6 +313,9 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
@ -320,8 +324,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
@ -332,8 +335,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
@ -360,8 +362,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
@ -372,8 +373,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image({'name': 'instance',
'uuid': uuid},
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
@ -8263,8 +8263,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = {"name": "instancename", "id": "instanceid",
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)

View File

@ -29,6 +29,7 @@ from oslo_utils import units
from nova import context
from nova import exception
from nova import keymgr
from nova import objects
from nova.openstack.common import imageutils
from nova.openstack.common import uuidutils
from nova import test
@ -57,8 +58,7 @@ class _ImageTestCase(object):
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
@ -1333,8 +1333,7 @@ class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):

View File

@ -15,6 +15,7 @@ import mock
from oslo_log import log as logging
from nova import exception
from nova import objects
from nova import test
from nova import utils
from nova.virt.libvirt import rbd_utils
@ -271,7 +272,7 @@ class RbdTestCase(test.NoDBTestCase):
@mock.patch.object(rbd_utils, 'rados')
@mock.patch.object(rbd_utils, 'RADOSClient')
def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd):
instance = {'uuid': '12345'}
instance = objects.Instance(id=1, uuid='12345')
rbd = mock_rbd.RBD.return_value
rbd.list.return_value = ['12345_test', '111_test']

View File

@ -26,6 +26,7 @@ from nova import exception
from nova.openstack.common import fileutils
from nova.storage import linuxscsi
from nova import test
from nova.tests.unit import fake_instance
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
@ -723,7 +724,8 @@ disk size: 4.4M
self.assertEqual(out, 'c')
def test_get_instance_path_at_destination(self):
instance = dict(name='fake_inst', uuid='fake_uuid')
instance = fake_instance.fake_instance_obj(None, name='fake_inst',
uuid='fake_uuid')
migrate_data = None
inst_path_at_dest = libvirt_utils.get_instance_path_at_destination(

View File

@ -25,6 +25,7 @@ from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit import fake_instance
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import vif
@ -302,10 +303,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
details = {network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client'}
)
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid'
}
instance = objects.Instance(id=1, uuid='instance-uuid')
bandwidth = {
'quota:vif_inbound_peak': '200',
@ -795,11 +793,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(utils, 'execute') as execute:
execute.side_effect = processutils.ProcessExecutionError
instance = {
'name': 'instance-name',
'uuid': 'instance-uuid',
'project_id': 'myproject'
}
instance = objects.Instance(id=1,
uuid='instance-uuid',
project_id='myproject')
d.plug_iovisor(instance, self.vif_ivs)
def test_unplug_mlnx_with_details(self):

View File

@ -473,7 +473,7 @@ def default_device_names(virt_type, context, instance, root_device_name,
def has_default_ephemeral(instance, disk_bus, block_device_info, mapping):
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
if instance['ephemeral_gb'] <= 0 or ephemerals:
if instance.ephemeral_gb <= 0 or ephemerals:
return None
else:
info = get_next_disk_info(mapping, disk_bus)

View File

@ -201,7 +201,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
filters added to the list must also be correctly defined
within the subclass.
"""
if pipelib.is_vpn_image(instance['image_ref']):
if pipelib.is_vpn_image(instance.image_ref):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
@ -283,8 +283,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
@staticmethod
def _instance_filter_name(instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
return 'nova-instance-%s' % (instance.name)
return 'nova-instance-%s-%s' % (instance.name, nic_id)
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
@ -294,7 +294,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
try:
self._conn.nwfilterLookupByName(instance_filter_name)
except libvirt.libvirtError:
name = instance['name']
name = instance.name
LOG.debug('The nwfilter(%(instance_filter_name)s) for'
'%(name)s is not found.',
{'instance_filter_name': instance_filter_name,
@ -336,7 +336,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
def unfilter_instance(self, instance, network_info):
# NOTE(salvatore-orlando):
# Overriding base class method for applying nwfilter operation
if self.instance_info.pop(instance['id'], None):
if self.instance_info.pop(instance.id, None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
self.nwfilter.unfilter_instance(instance, network_info)

View File

@ -545,7 +545,7 @@ class Lvm(Image):
' images_volume_group'
' flag to use LVM images.'))
self.vg = CONF.libvirt.images_volume_group
self.lv = '%s_%s' % (instance['uuid'],
self.lv = '%s_%s' % (instance.uuid,
self.escape(disk_name))
if self.ephemeral_key_uuid is None:
self.path = os.path.join('/dev', self.vg, self.lv)
@ -647,7 +647,7 @@ class Rbd(Image):
except IndexError:
raise exception.InvalidDevicePath(path=path)
else:
self.rbd_name = '%s_%s' % (instance['uuid'], disk_name)
self.rbd_name = '%s_%s' % (instance.uuid, disk_name)
if not CONF.libvirt.images_rbd_pool:
raise RuntimeError(_('You should specify'

View File

@ -257,7 +257,7 @@ class RBDDriver(object):
with RADOSClient(self, self.pool) as client:
def belongs_to_instance(disk):
return disk.startswith(instance['uuid'])
return disk.startswith(instance.uuid)
volumes = rbd.RBD().list(client.ioctx)
for volume in filter(belongs_to_instance, volumes):

View File

@ -514,15 +514,15 @@ def get_instance_path(instance, forceold=False, relative=False):
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
pre_grizzly_name = os.path.join(CONF.instances_path, instance.name)
if forceold or os.path.exists(pre_grizzly_name):
if relative:
return instance['name']
return instance.name
return pre_grizzly_name
if relative:
return instance['uuid']
return os.path.join(CONF.instances_path, instance['uuid'])
return instance.uuid
return os.path.join(CONF.instances_path, instance.uuid)
def get_instance_path_at_destination(instance, migrate_data=None):

View File

@ -164,7 +164,7 @@ class LibvirtGenericVIFDriver(object):
self.get_vif_devname(vif))
mac_id = vif['address'].replace(':', '')
name = "nova-instance-" + instance['name'] + "-" + mac_id
name = "nova-instance-" + instance.name + "-" + mac_id
if self.get_firewall_required(vif):
conf.filtername = name
designer.set_vif_bandwidth_config(conf, inst_type)
@ -427,10 +427,10 @@ class LibvirtGenericVIFDriver(object):
if port == 'ovs':
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
v2_name, iface_id,
vif['address'], instance['uuid'])
vif['address'], instance.uuid)
elif port == 'ivs':
linux_net.create_ivs_vif_port(v2_name, iface_id,
vif['address'], instance['uuid'])
vif['address'], instance.uuid)
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
@ -453,7 +453,7 @@ class LibvirtGenericVIFDriver(object):
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
linux_net.create_ivs_vif_port(dev, iface_id, vif['address'],
instance['uuid'])
instance.uuid)
def plug_ivs_hybrid(self, instance, vif):
"""Plug using hybrid strategy (same as OVS)
@ -473,7 +473,7 @@ class LibvirtGenericVIFDriver(object):
def plug_mlnx_direct(self, instance, vif):
vnic_mac = vif['address']
device_id = instance['uuid']
device_id = instance.uuid
fabric = vif.get_physical_network()
if not fabric:
raise exception.NetworkMissingPhysicalNetwork(
@ -523,7 +523,7 @@ class LibvirtGenericVIFDriver(object):
iface_id = vif['id']
linux_net.create_tap_dev(dev)
net_id = vif['network']['id']
tenant_id = instance["project_id"]
tenant_id = instance.project_id
try:
utils.execute('ifc_ctl', 'gateway', 'add_port', dev,
run_as_root=True)
@ -545,7 +545,7 @@ class LibvirtGenericVIFDriver(object):
vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
port_name, iface_id, vif['address'],
instance['uuid'])
instance.uuid)
linux_net.ovs_set_vhostuser_port_type(port_name)
def plug_vrouter(self, instance, vif):