Consider network NUMA affinity for move operations
There are two things required here. Firstly, we need to start (consistently) storing the physnet and tunneled status of various networks in 'Instance.info_cache.network_info'. Once we have this information, we can use it to populate the 'RequestSpec.network_metadata', which can be consumed by later changes in the series. Note that live migrations and evacuations with a forced destination host bypass the scheduler which also means there will be no NUMA vswitch affinity "claims" on the destination host for those force move operations. This is not a regression since (1) live migration is not NUMA affinity aware nor does claims anyway (see blueprint numa-aware-live-migration) and (2) forced host evacuate already does not perform claims on the compute since the scheduler is bypassed so the limits passed to compute, used for the claim, are empty. Part of blueprint numa-aware-vswitches Change-Id: I393bd58b8fede38af98ded0c7be099ef22b6f75b
This commit is contained in:
parent
a0de99931b
commit
7aa9d1c23b
@ -794,6 +794,7 @@ class ComputeTaskManager(base.Base):
|
||||
cell=instance_mapping.cell_mapping))
|
||||
|
||||
request_spec.ensure_project_and_user_id(instance)
|
||||
request_spec.ensure_network_metadata(instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
context, request_spec, instance)
|
||||
host_lists = self._schedule_instances(context,
|
||||
@ -950,6 +951,7 @@ class ComputeTaskManager(base.Base):
|
||||
instance,
|
||||
image_ref)
|
||||
request_spec.ensure_project_and_user_id(instance)
|
||||
request_spec.ensure_network_metadata(instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
context, request_spec, instance)
|
||||
host_lists = self._schedule_instances(context,
|
||||
|
@ -303,6 +303,7 @@ class LiveMigrationTask(base.TaskBase):
|
||||
cell=cell_mapping)
|
||||
|
||||
request_spec.ensure_project_and_user_id(self.instance)
|
||||
request_spec.ensure_network_metadata(self.instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
self.context, request_spec, self.instance)
|
||||
|
||||
|
@ -221,6 +221,7 @@ class MigrationTask(base.TaskBase):
|
||||
migration = self._preallocate_migration()
|
||||
|
||||
self.request_spec.ensure_project_and_user_id(self.instance)
|
||||
self.request_spec.ensure_network_metadata(self.instance)
|
||||
compute_utils.heal_reqspec_is_bfv(
|
||||
self.context, self.request_spec, self.instance)
|
||||
# On an initial call to migrate, 'self.host_list' will be None, so we
|
||||
|
@ -2589,7 +2589,9 @@ class API(base_api.NetworkAPI):
|
||||
if fixed_ip.is_in_subnet(subnet)]
|
||||
return subnets
|
||||
|
||||
def _nw_info_build_network(self, port, networks, subnets):
|
||||
def _nw_info_build_network(self, context, port, networks, subnets):
|
||||
# TODO(stephenfin): Pass in an existing admin client if available.
|
||||
neutron = get_client(context, admin=True)
|
||||
network_name = None
|
||||
network_mtu = None
|
||||
for net in networks:
|
||||
@ -2641,20 +2643,19 @@ class API(base_api.NetworkAPI):
|
||||
if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
|
||||
bridge = bridge[:network_model.NIC_NAME_LEN]
|
||||
|
||||
physnet, tunneled = self._get_physnet_tunneled_info(
|
||||
context, neutron, port['network_id'])
|
||||
network = network_model.Network(
|
||||
id=port['network_id'],
|
||||
bridge=bridge,
|
||||
injected=CONF.flat_injected,
|
||||
label=network_name,
|
||||
tenant_id=tenant_id,
|
||||
mtu=network_mtu
|
||||
mtu=network_mtu,
|
||||
physical_network=physnet,
|
||||
tunneled=tunneled
|
||||
)
|
||||
network['subnets'] = subnets
|
||||
port_profile = _get_binding_profile(port)
|
||||
if port_profile:
|
||||
physical_network = port_profile.get('physical_network')
|
||||
if physical_network:
|
||||
network['physical_network'] = physical_network
|
||||
|
||||
if should_create_bridge is not None:
|
||||
network['should_create_bridge'] = should_create_bridge
|
||||
@ -2705,7 +2706,7 @@ class API(base_api.NetworkAPI):
|
||||
devname = devname[:network_model.NIC_NAME_LEN]
|
||||
|
||||
network, ovs_interfaceid = (
|
||||
self._nw_info_build_network(current_neutron_port,
|
||||
self._nw_info_build_network(context, current_neutron_port,
|
||||
networks, subnets))
|
||||
preserve_on_delete = (current_neutron_port['id'] in
|
||||
preexisting_port_ids)
|
||||
|
@ -469,6 +469,26 @@ class RequestSpec(base.NovaObject):
|
||||
if 'user_id' not in self or self.user_id is None:
|
||||
self.user_id = instance.user_id
|
||||
|
||||
def ensure_network_metadata(self, instance):
|
||||
if not (instance.info_cache and instance.info_cache.network_info):
|
||||
return
|
||||
|
||||
physnets = set([])
|
||||
tunneled = True
|
||||
|
||||
# physical_network and tunneled might not be in the cache for old
|
||||
# instances that haven't had their info_cache healed yet
|
||||
for vif in instance.info_cache.network_info:
|
||||
physnet = vif.get('network', {}).get('meta', {}).get(
|
||||
'physical_network', None)
|
||||
if physnet:
|
||||
physnets.add(physnet)
|
||||
tunneled |= vif.get('network', {}).get('meta', {}).get(
|
||||
'tunneled', False)
|
||||
|
||||
self.network_metadata = objects.NetworkMetadata(
|
||||
physnets=physnets, tunneled=tunneled)
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(context, spec, db_spec):
|
||||
spec_obj = spec.obj_from_primitive(jsonutils.loads(db_spec['spec']))
|
||||
|
@ -62,6 +62,10 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
self.heal_reqspec_is_bfv_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
_p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata')
|
||||
self.ensure_network_metadata_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
def _generate_task(self):
|
||||
self.task = live_migrate.LiveMigrationTask(self.context,
|
||||
self.instance, self.destination, self.block_migration,
|
||||
@ -127,6 +131,11 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
# heal the request spec.
|
||||
self.heal_reqspec_is_bfv_mock.assert_not_called()
|
||||
|
||||
# When the task is executed with a destination it means the host is
|
||||
# being forced and we don't call the scheduler, so we don't need to
|
||||
# modify the request spec
|
||||
self.ensure_network_metadata_mock.assert_not_called()
|
||||
|
||||
def test_execute_with_destination_old_school(self):
|
||||
self.test_execute_with_destination(new_mode=False)
|
||||
|
||||
@ -349,6 +358,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
|
||||
mock_setup.assert_called_once_with(self.context, self.fake_spec)
|
||||
mock_reset.assert_called_once_with()
|
||||
self.ensure_network_metadata_mock.assert_called_once_with(
|
||||
self.instance)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, self.fake_spec, self.instance)
|
||||
mock_select.assert_called_once_with(self.context, self.fake_spec,
|
||||
@ -383,6 +394,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
|
||||
|
||||
get_image.assert_called_once_with(self.instance.system_metadata)
|
||||
setup_ig.assert_called_once_with(self.context, another_spec)
|
||||
self.ensure_network_metadata_mock.assert_called_once_with(
|
||||
self.instance)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, another_spec, self.instance)
|
||||
select_dest.assert_called_once_with(self.context, another_spec,
|
||||
|
@ -50,10 +50,15 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
||||
'hosts': [['host1', 'node1']]}}
|
||||
self.reservations = []
|
||||
self.clean_shutdown = True
|
||||
|
||||
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
|
||||
self.heal_reqspec_is_bfv_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
_p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata')
|
||||
self.ensure_network_metadata_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
def _generate_task(self):
|
||||
return migrate.MigrationTask(self.context, self.instance, self.flavor,
|
||||
self.request_spec,
|
||||
@ -134,6 +139,8 @@ class MigrationTaskTestCase(test.NoDBTestCase):
|
||||
|
||||
task.execute()
|
||||
|
||||
self.ensure_network_metadata_mock.assert_called_once_with(
|
||||
self.instance)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, self.request_spec, self.instance)
|
||||
sig_mock.assert_called_once_with(self.context, self.request_spec)
|
||||
|
@ -347,6 +347,10 @@ class _BaseTaskTestCase(object):
|
||||
self.heal_reqspec_is_bfv_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
_p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata')
|
||||
self.ensure_network_metadata_mock = _p.start()
|
||||
self.addCleanup(_p.stop)
|
||||
|
||||
def _prepare_rebuild_args(self, update_args=None):
|
||||
# Args that don't get passed in to the method but do get passed to RPC
|
||||
migration = update_args and update_args.pop('migration', None)
|
||||
@ -1032,6 +1036,8 @@ class _BaseTaskTestCase(object):
|
||||
# ComputeTaskManager.
|
||||
if isinstance(self.conductor,
|
||||
conductor_manager.ComputeTaskManager):
|
||||
self.ensure_network_metadata_mock.assert_called_once_with(
|
||||
test.MatchType(objects.Instance))
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, fake_spec, instance)
|
||||
sched_instances.assert_called_once_with(
|
||||
@ -1042,6 +1048,8 @@ class _BaseTaskTestCase(object):
|
||||
else:
|
||||
# RPC API tests won't have the same request spec or instance
|
||||
# since they go over the wire.
|
||||
self.ensure_network_metadata_mock.assert_called_once_with(
|
||||
test.MatchType(objects.Instance))
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, test.MatchType(objects.RequestSpec),
|
||||
test.MatchType(objects.Instance))
|
||||
@ -1299,6 +1307,8 @@ class _BaseTaskTestCase(object):
|
||||
obj_base.obj_to_primitive(inst_obj.image_meta), [inst_obj])
|
||||
fp_mock.assert_called_once_with(self.context, request_spec,
|
||||
filter_properties)
|
||||
self.ensure_network_metadata_mock.assert_called_once_with(
|
||||
inst_obj)
|
||||
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
|
||||
self.context, fake_spec, inst_obj)
|
||||
select_dest_mock.assert_called_once_with(self.context, fake_spec,
|
||||
|
@ -766,6 +766,12 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
self.instance['uuid'], mox.IgnoreArg()).AndReturn(fake_info_cache)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(
|
||||
mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
self.port_data1[0]['network_id']).AndReturn((None, False))
|
||||
self.moxed_client.list_ports(
|
||||
tenant_id=self.instance['project_id'],
|
||||
device_id=self.instance['uuid']).AndReturn(
|
||||
@ -1400,6 +1406,13 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
net_ids = [port['network_id'] for port in port_data]
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
if number == 2:
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(
|
||||
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(
|
||||
(None, False))
|
||||
self.moxed_client.list_networks(id=net_ids).AndReturn(
|
||||
{'networks': nets})
|
||||
float_data = number == 1 and self.float_data1 or self.float_data2
|
||||
@ -2433,10 +2446,15 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
'mtu': 9000}]
|
||||
api = neutronapi.API()
|
||||
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(self.context, self.moxed_client,
|
||||
'net-id').AndReturn((None, False))
|
||||
self.mox.ReplayAll()
|
||||
neutronapi.get_client(uuids.fake)
|
||||
net, iid = api._nw_info_build_network(fake_port, fake_nets,
|
||||
fake_subnets)
|
||||
net, iid = api._nw_info_build_network(self.context, fake_port,
|
||||
fake_nets, fake_subnets)
|
||||
self.assertEqual(fake_subnets, net['subnets'])
|
||||
self.assertEqual('net-id', net['id'])
|
||||
self.assertEqual('foo', net['label'])
|
||||
@ -2488,10 +2506,15 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
fake_nets = [{'id': 'net-id2', 'name': 'foo', 'tenant_id': 'tenant'}]
|
||||
api = neutronapi.API()
|
||||
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(self.context, self.moxed_client,
|
||||
'net-id1').AndReturn((None, False))
|
||||
self.mox.ReplayAll()
|
||||
neutronapi.get_client(uuids.fake)
|
||||
net, iid = api._nw_info_build_network(fake_port, fake_nets,
|
||||
fake_subnets)
|
||||
net, iid = api._nw_info_build_network(self.context, fake_port,
|
||||
fake_nets, fake_subnets)
|
||||
self.assertEqual(fake_subnets, net['subnets'])
|
||||
self.assertEqual('net-id1', net['id'])
|
||||
self.assertEqual('tenant', net['meta']['tenant_id'])
|
||||
@ -2510,10 +2533,15 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
|
||||
api = neutronapi.API()
|
||||
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
'net-id').AndReturn((None, False))
|
||||
self.mox.ReplayAll()
|
||||
neutronapi.get_client(uuids.fake)
|
||||
net, iid = api._nw_info_build_network(fake_port, fake_nets,
|
||||
fake_subnets)
|
||||
net, iid = api._nw_info_build_network(self.context, fake_port,
|
||||
fake_nets, fake_subnets)
|
||||
self.assertEqual(fake_subnets, net['subnets'])
|
||||
self.assertEqual('net-id', net['id'])
|
||||
self.assertEqual('foo', net['label'])
|
||||
@ -2538,10 +2566,15 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
|
||||
api = neutronapi.API()
|
||||
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
'net-id').AndReturn((None, False))
|
||||
self.mox.ReplayAll()
|
||||
neutronapi.get_client(uuids.fake)
|
||||
net, ovs_interfaceid = api._nw_info_build_network(
|
||||
fake_port, fake_nets, fake_subnets)
|
||||
self.context, fake_port, fake_nets, fake_subnets)
|
||||
self.assertEqual(fake_subnets, net['subnets'])
|
||||
self.assertEqual('net-id', net['id'])
|
||||
self.assertEqual('foo', net['label'])
|
||||
@ -2565,10 +2598,15 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
|
||||
api = neutronapi.API()
|
||||
neutronapi.get_client(mox.IgnoreArg()).AndReturn(self.moxed_client)
|
||||
neutronapi.get_client(mox.IgnoreArg(), admin=True).AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
'net-id').AndReturn((None, False))
|
||||
self.mox.ReplayAll()
|
||||
neutronapi.get_client(uuids.fake)
|
||||
net, iid = api._nw_info_build_network(fake_port, fake_nets,
|
||||
fake_subnets)
|
||||
net, iid = api._nw_info_build_network(self.context, fake_port,
|
||||
fake_nets, fake_subnets)
|
||||
self.assertNotEqual(CONF.neutron.ovs_bridge, net['bridge'])
|
||||
self.assertEqual('custom-bridge', net['bridge'])
|
||||
|
||||
@ -2701,6 +2739,16 @@ class TestNeutronv2(TestNeutronv2Base):
|
||||
|
||||
self.mox.StubOutWithMock(api, '_get_preexisting_port_ids')
|
||||
api._get_preexisting_port_ids(fake_inst).AndReturn(['port5'])
|
||||
|
||||
neutronapi.get_client(mox.IgnoreArg(),
|
||||
admin=True).MultipleTimes().AndReturn(
|
||||
self.moxed_client)
|
||||
self.mox.StubOutWithMock(api, '_get_physnet_tunneled_info')
|
||||
api._get_physnet_tunneled_info(
|
||||
mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
mox.IgnoreArg()).MultipleTimes().AndReturn(
|
||||
(None, False))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
fake_inst.info_cache = objects.InstanceInfoCache.new(
|
||||
self.context, uuids.instance)
|
||||
@ -3068,7 +3116,7 @@ class TestNeutronv2WithMock(_TestNeutronv2Common):
|
||||
|
||||
nw_inf = self.api.get_instance_nw_info(self.context, instance)
|
||||
|
||||
mock_get_client.assert_called_once_with(mock.ANY, admin=True)
|
||||
mock_get_client.assert_any_call(mock.ANY, admin=True)
|
||||
mock_cache_update.assert_called_once_with(
|
||||
mock.ANY, self.instance['uuid'], mock.ANY)
|
||||
mock_cache_get.assert_called_once_with(mock.ANY, self.instance['uuid'])
|
||||
@ -3311,7 +3359,7 @@ class TestNeutronv2WithMock(_TestNeutronv2Common):
|
||||
self.assertEqual(port_ids[iface_index],
|
||||
nw_infs[iface_index]['id'])
|
||||
|
||||
mock_get_client.assert_called_once_with(mock.ANY, admin=True)
|
||||
mock_get_client.assert_any_call(mock.ANY, admin=True)
|
||||
mock_cache_update.assert_called_once_with(
|
||||
mock.ANY, self.instance['uuid'], mock.ANY)
|
||||
mock_cache_get.assert_called_once_with(mock.ANY, self.instance['uuid'])
|
||||
|
@ -19,12 +19,14 @@ from oslo_versionedobjects import base as ovo_base
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova.network import model as network_model
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import request_spec
|
||||
from nova.tests.unit.api.openstack import fakes
|
||||
from nova.tests.unit import fake_flavor
|
||||
from nova.tests.unit import fake_instance
|
||||
from nova.tests.unit import fake_network_cache_model
|
||||
from nova.tests.unit import fake_request_spec
|
||||
from nova.tests.unit.objects import test_objects
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
@ -500,6 +502,47 @@ class _TestRequestSpecObject(object):
|
||||
spec = objects.RequestSpec()
|
||||
self.assertEqual({}, spec.to_legacy_filter_properties_dict())
|
||||
|
||||
def test_ensure_network_metadata(self):
|
||||
network_a = fake_network_cache_model.new_network({
|
||||
'physical_network': 'foo', 'tunneled': False})
|
||||
vif_a = fake_network_cache_model.new_vif({'network': network_a})
|
||||
network_b = fake_network_cache_model.new_network({
|
||||
'physical_network': 'foo', 'tunneled': False})
|
||||
vif_b = fake_network_cache_model.new_vif({'network': network_b})
|
||||
network_c = fake_network_cache_model.new_network({
|
||||
'physical_network': 'bar', 'tunneled': False})
|
||||
vif_c = fake_network_cache_model.new_vif({'network': network_c})
|
||||
network_d = fake_network_cache_model.new_network({
|
||||
'physical_network': None, 'tunneled': True})
|
||||
vif_d = fake_network_cache_model.new_vif({'network': network_d})
|
||||
nw_info = network_model.NetworkInfo([vif_a, vif_b, vif_c, vif_d])
|
||||
info_cache = objects.InstanceInfoCache(network_info=nw_info,
|
||||
instance_uuid=uuids.instance)
|
||||
instance = objects.Instance(id=3, uuid=uuids.instance,
|
||||
info_cache=info_cache)
|
||||
|
||||
spec = objects.RequestSpec()
|
||||
self.assertNotIn('network_metadata', spec)
|
||||
|
||||
spec.ensure_network_metadata(instance)
|
||||
self.assertIn('network_metadata', spec)
|
||||
self.assertIsInstance(spec.network_metadata, objects.NetworkMetadata)
|
||||
self.assertEqual(spec.network_metadata.physnets, set(['foo', 'bar']))
|
||||
self.assertTrue(spec.network_metadata.tunneled)
|
||||
|
||||
def test_ensure_network_metadata_missing(self):
|
||||
nw_info = network_model.NetworkInfo([])
|
||||
info_cache = objects.InstanceInfoCache(network_info=nw_info,
|
||||
instance_uuid=uuids.instance)
|
||||
instance = objects.Instance(id=3, uuid=uuids.instance,
|
||||
info_cache=info_cache)
|
||||
|
||||
spec = objects.RequestSpec()
|
||||
self.assertNotIn('network_metadata', spec)
|
||||
|
||||
spec.ensure_network_metadata(instance)
|
||||
self.assertNotIn('network_metadata', spec)
|
||||
|
||||
@mock.patch.object(request_spec.RequestSpec,
|
||||
'_get_by_instance_uuid_from_db')
|
||||
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
|
||||
|
Loading…
x
Reference in New Issue
Block a user