xeanpi: pass network_info to generate_configdrive
Currently network_info is being fetched when generating config drive. This change ensures the network_info that is already available is re-used when generating the config_drive. In addition, it is more consistent to use the network_info that is passed into this function, rather than potentially lazy loading the whole info_cache of the instance from the DB, which may not yet be populated when we are generating the metadata for ConfigDrive anyway. Fixes bug 1224508 Change-Id: I3e6f5a987593ecd422be61ac50c0d74463ba25d5
This commit is contained in:
committed by
Gerrit Code Review
parent
0db710d860
commit
137a1cd9c1
@@ -142,6 +142,9 @@ def image_ec2_id(image_id, image_type='ami'):
|
||||
|
||||
|
||||
def get_ip_info_for_instance_from_nw_info(nw_info):
|
||||
if not isinstance(nw_info, network_model.NetworkInfo):
|
||||
nw_info = network_model.NetworkInfo.hydrate(nw_info)
|
||||
|
||||
ip_info = {}
|
||||
fixed_ips = nw_info.fixed_ips()
|
||||
ip_info['fixed_ips'] = [ip['address'] for ip in fixed_ips
|
||||
@@ -165,8 +168,6 @@ def get_ip_info_for_instance(context, instance):
|
||||
# Make sure empty response is turned into the model
|
||||
if not nw_info:
|
||||
nw_info = []
|
||||
if not isinstance(nw_info, network_model.NetworkInfo):
|
||||
nw_info = network_model.NetworkInfo.hydrate(nw_info)
|
||||
return get_ip_info_for_instance_from_nw_info(nw_info)
|
||||
|
||||
|
||||
|
||||
@@ -122,8 +122,6 @@ class InstanceMetadata():
|
||||
self.availability_zone = ec2utils.get_availability_zone_by_host(
|
||||
instance['host'], capi)
|
||||
|
||||
self.ip_info = ec2utils.get_ip_info_for_instance(ctxt, instance)
|
||||
|
||||
self.security_groups = capi.security_group_get_by_instance(ctxt,
|
||||
instance)
|
||||
|
||||
@@ -153,6 +151,9 @@ class InstanceMetadata():
|
||||
network_info = network.API().get_instance_nw_info(ctxt,
|
||||
instance)
|
||||
|
||||
self.ip_info = \
|
||||
ec2utils.get_ip_info_for_instance_from_nw_info(network_info)
|
||||
|
||||
self.network_config = None
|
||||
cfg = netutils.get_injected_network_template(network_info)
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ def return_non_existing_address(*args, **kwarg):
|
||||
|
||||
def fake_InstanceMetadata(stubs, inst_data, address=None,
|
||||
sgroups=None, content=[], extra_md={},
|
||||
vd_driver=None):
|
||||
vd_driver=None, network_info=None):
|
||||
|
||||
if sgroups is None:
|
||||
sgroups = [{'name': 'default'}]
|
||||
@@ -101,7 +101,7 @@ def fake_InstanceMetadata(stubs, inst_data, address=None,
|
||||
stubs.Set(api, 'security_group_get_by_instance', sg_get)
|
||||
return base.InstanceMetadata(inst_data, address=address,
|
||||
content=content, extra_md=extra_md,
|
||||
vd_driver=vd_driver)
|
||||
vd_driver=vd_driver, network_info=network_info)
|
||||
|
||||
|
||||
def fake_request(stubs, mdinst, relpath, address="127.0.0.1",
|
||||
@@ -276,7 +276,7 @@ class MetadataTestCase(test.TestCase):
|
||||
self.assertTrue(md._check_version('2009-04-04', '2009-04-04'))
|
||||
|
||||
def test_InstanceMetadata_uses_passed_network_info(self):
|
||||
network_info = {"a": "b"}
|
||||
network_info = []
|
||||
|
||||
self.mox.StubOutWithMock(netutils, "get_injected_network_template")
|
||||
netutils.get_injected_network_template(network_info).AndReturn(False)
|
||||
@@ -291,7 +291,7 @@ class MetadataTestCase(test.TestCase):
|
||||
self.assertIsNotNone(path)
|
||||
|
||||
def test_InstanceMetadata_queries_network_API_when_needed(self):
|
||||
network_info_from_api = {"c": "d"}
|
||||
network_info_from_api = []
|
||||
|
||||
self.mox.StubOutWithMock(network_api.API, "get_instance_nw_info")
|
||||
|
||||
|
||||
@@ -151,10 +151,11 @@ class GenerateConfigDriveTestCase(test.NoDBTestCase):
|
||||
contextified('mounted_dev'))
|
||||
|
||||
class FakeInstanceMetadata(object):
|
||||
def __init__(self, instance, content=None, extra_md=None):
|
||||
pass
|
||||
def __init__(_self, instance, content=None, extra_md=None,
|
||||
network_info=None):
|
||||
self.assertEqual(network_info, "nw_info")
|
||||
|
||||
def metadata_for_config_drive(self):
|
||||
def metadata_for_config_drive(_self):
|
||||
return []
|
||||
|
||||
self.useFixture(fixtures.MonkeyPatch(
|
||||
@@ -178,7 +179,7 @@ class GenerateConfigDriveTestCase(test.NoDBTestCase):
|
||||
|
||||
# And the actual call we're testing
|
||||
vm_utils.generate_configdrive('session', instance, 'vm_ref',
|
||||
'userdevice')
|
||||
'userdevice', "nw_info")
|
||||
|
||||
|
||||
class XenAPIGetUUID(test.NoDBTestCase):
|
||||
|
||||
@@ -363,7 +363,7 @@ class SpawnTestCase(VMOpsTestBase):
|
||||
self.vmops._update_instance_progress(context, instance, step, steps)
|
||||
|
||||
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
|
||||
admin_password, injected_files)
|
||||
network_info, admin_password, injected_files)
|
||||
step += 1
|
||||
self.vmops._update_instance_progress(context, instance, step, steps)
|
||||
|
||||
@@ -468,7 +468,7 @@ class SpawnTestCase(VMOpsTestBase):
|
||||
if resize_instance:
|
||||
self.vmops._resize_up_root_vdi(instance, root_vdi)
|
||||
self.vmops._attach_disks(instance, vm_ref, name_label, vdis, di_type,
|
||||
None, None)
|
||||
network_info, None, None)
|
||||
self.vmops._attach_mapped_block_devices(instance, block_device_info)
|
||||
|
||||
self.vmops._inject_instance_metadata(instance, vm_ref)
|
||||
|
||||
@@ -2287,7 +2287,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
|
||||
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
|
||||
|
||||
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
|
||||
vdis, disk_image_type)
|
||||
vdis, disk_image_type, "fake_nw_inf")
|
||||
|
||||
self.assertEqual(marker["partition_called"], called)
|
||||
|
||||
@@ -2379,7 +2379,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
|
||||
|
||||
self.called = False
|
||||
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
|
||||
vdis, disk_image_type)
|
||||
vdis, disk_image_type, "fake_nw_inf")
|
||||
self.assertTrue(self.called)
|
||||
|
||||
def test_generate_swap(self):
|
||||
|
||||
@@ -987,7 +987,7 @@ def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice,
|
||||
|
||||
|
||||
def generate_configdrive(session, instance, vm_ref, userdevice,
|
||||
admin_password=None, files=None):
|
||||
network_info, admin_password=None, files=None):
|
||||
sr_ref = safe_find_sr(session)
|
||||
vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
|
||||
'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
|
||||
@@ -998,8 +998,8 @@ def generate_configdrive(session, instance, vm_ref, userdevice,
|
||||
if admin_password:
|
||||
extra_md['admin_pass'] = admin_password
|
||||
inst_md = instance_metadata.InstanceMetadata(instance,
|
||||
content=files,
|
||||
extra_md=extra_md)
|
||||
content=files, extra_md=extra_md,
|
||||
network_info=network_info)
|
||||
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
|
||||
with utils.tempdir() as tmp_path:
|
||||
tmp_file = os.path.join(tmp_path, 'configdrive')
|
||||
|
||||
@@ -418,7 +418,7 @@ class VMOps(object):
|
||||
self._resize_up_root_vdi(instance, root_vdi)
|
||||
|
||||
self._attach_disks(instance, vm_ref, name_label, vdis,
|
||||
disk_image_type, admin_password,
|
||||
disk_image_type, network_info, admin_password,
|
||||
injected_files)
|
||||
if not first_boot:
|
||||
self._attach_mapped_block_devices(instance,
|
||||
@@ -560,7 +560,8 @@ class VMOps(object):
|
||||
return vm_mode.HVM
|
||||
|
||||
def _attach_disks(self, instance, vm_ref, name_label, vdis,
|
||||
disk_image_type, admin_password=None, files=None):
|
||||
disk_image_type, network_info,
|
||||
admin_password=None, files=None):
|
||||
ctx = nova_context.get_admin_context()
|
||||
instance_type = flavors.extract_flavor(instance)
|
||||
|
||||
@@ -619,6 +620,7 @@ class VMOps(object):
|
||||
if configdrive.required_by(instance):
|
||||
vm_utils.generate_configdrive(self._session, instance, vm_ref,
|
||||
DEVICE_CONFIGDRIVE,
|
||||
network_info,
|
||||
admin_password=admin_password,
|
||||
files=files)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user