From befa4a61b03cd589ab9a616d3b5c265d8b17f524 Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Fri, 9 Nov 2012 17:13:20 -0600 Subject: [PATCH] Use connection_info on resize During finish_migration the manager calls initialize_connection but doesn't update the block_device_mapping with the (potentially new) connection_info returned. This can cause the bdm to get out of sync, the volume to fail to attach, and the resize to fail. Fixes bug #1076801 Change-Id: Ie49ccd2138905e178843b375a9b16c3fe572d1db --- nova/compute/manager.py | 58 ++++++------ nova/tests/compute/test_compute.py | 142 +++++++++++++++++++++++++++++ 2 files changed, 173 insertions(+), 27 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8927e682aef2..3cf62bc3682d 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1860,6 +1860,28 @@ class ComputeManager(manager.SchedulerDependentManager): migration, migration['source_compute'], reservations) + def _refresh_block_device_connection_info(self, context, instance): + """After some operations, the IQN or CHAP, for example, may have + changed. This call updates the DB with the latest connection info. + """ + bdms = self._get_instance_volume_bdms(context, instance) + + if not bdms: + return bdms + + connector = self.driver.get_volume_connector(instance) + + for bdm in bdms: + volume = self.volume_api.get(context, bdm['volume_id']) + cinfo = self.volume_api.initialize_connection( + context, volume, connector) + + self.conductor_api.block_device_mapping_update( + context, bdm['id'], + {'connection_info': jsonutils.dumps(cinfo)}) + + return bdms + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @reverts_task_state @wrap_instance_event @@ -1901,15 +1923,11 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_api.setup_networks_on_host(context, instance, migration['source_compute']) - bdms = self._get_instance_volume_bdms(context, instance) - block_device_info = self._get_instance_volume_block_device_info( + bdms = self._refresh_block_device_connection_info( context, instance) - if bdms: - connector = self.driver.get_volume_connector(instance) - for bdm in bdms: - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.initialize_connection(context, volume, - connector) + + block_device_info = self._get_instance_volume_block_device_info( + context, instance, bdms=bdms) self.driver.finish_revert_migration(instance, self._legacy_nw_info(network_info), @@ -2171,17 +2189,11 @@ class ComputeManager(manager.SchedulerDependentManager): context, instance, "finish_resize.start", network_info=network_info) - bdms = self._get_instance_volume_bdms(context, instance) + bdms = self._refresh_block_device_connection_info(context, instance) + block_device_info = self._get_instance_volume_block_device_info( context, instance, bdms=bdms) - if bdms: - connector = self.driver.get_volume_connector(instance) - for bdm in bdms: - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.initialize_connection(context, volume, - connector) - self.driver.finish_migration(context, migration, instance, disk_info, self._legacy_nw_info(network_info), @@ -2748,18 +2760,10 @@ class ComputeManager(manager.SchedulerDependentManager): required for live migration without shared storage. """ - # If any volume is mounted, prepare here. - block_device_info = self._get_instance_volume_block_device_info( - context, instance) - if not block_device_info['block_device_mapping']: - LOG.info(_('Instance has no volume.'), instance=instance) + bdms = self._refresh_block_device_connection_info(context, instance) - # assign the volume to host system - # needed by the lefthand volume driver and maybe others - connector = self.driver.get_volume_connector(instance) - for bdm in self._get_instance_volume_bdms(context, instance): - volume = self.volume_api.get(context, bdm['volume_id']) - self.volume_api.initialize_connection(context, volume, connector) + block_device_info = self._get_instance_volume_block_device_info( + context, instance, bdms=bdms) network_info = self._get_instance_nw_info(context, instance) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 12bd3cf19d29..f85142d67585 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -1919,6 +1919,148 @@ class ComputeTestCase(BaseTestCase): reservations=reservations) self.compute.terminate_instance(self.context, instance=instance) + def test_finish_resize_with_volumes(self): + """Contrived test to ensure finish_resize doesn't raise anything.""" + + # create instance + instance = jsonutils.to_primitive(self._create_fake_instance()) + + # create volume + volume_id = 'fake' + volume = {'instance_uuid': None, + 'device_name': None, + 'volume_id': volume_id} + + # stub out volume attach + def fake_volume_get(self, context, volume): + return volume + self.stubs.Set(cinder.API, "get", fake_volume_get) + + orig_connection_data = { + 'target_discovered': True, + 'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id, + 'target_portal': '127.0.0.0.1:3260', + 'volume_id': volume_id, + } + connection_info = { + 'driver_volume_type': 'iscsi', + 'data': orig_connection_data, + } + + def fake_init_conn(self, context, volume, session): + return connection_info + self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn) + + def fake_attach(self, context, volume_id, instance_uuid, device_name): + volume['instance_uuid'] = instance_uuid + volume['device_name'] = device_name + self.stubs.Set(cinder.API, "attach", fake_attach) + + # stub out virt driver attach + def fake_get_volume_connector(*args, **kwargs): + return {} + self.stubs.Set(self.compute.driver, 'get_volume_connector', + fake_get_volume_connector) + + def fake_attach_volume(*args, **kwargs): + pass + self.stubs.Set(self.compute.driver, 'attach_volume', + fake_attach_volume) + + # attach volume to instance + self.compute.attach_volume(self.context, volume['volume_id'], + '/dev/vdc', instance) + + # assert volume attached correctly + self.assertEquals(volume['device_name'], '/dev/vdc') + disk_info = db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']) + self.assertEquals(len(disk_info), 1) + for bdm in disk_info: + self.assertEquals(bdm['device_name'], volume['device_name']) + self.assertEquals(bdm['connection_info'], + jsonutils.dumps(connection_info)) + + # begin resize + instance_type = instance_types.get_default_instance_type() + db.instance_update(self.context, instance["uuid"], + {"task_state": task_states.RESIZE_PREP}) + self.compute.prep_resize(self.context, instance=instance, + instance_type=instance_type, + image={}) + + # NOTE(sirp): `prep_resize` mutates the `system_metadata` attribute in + # the DB but not on the instance passed in, so to sync the two, we need + # to refetch the row from the DB + instance = db.instance_get_by_uuid(self.context, instance['uuid']) + + migration_ref = db.migration_get_by_instance_and_status( + self.context.elevated(), instance['uuid'], 'pre-migrating') + + # fake out detach for prep_resize (and later terminate) + def fake_terminate_connection(self, context, volume, connector): + connection_info['data'] = None + self.stubs.Set(cinder.API, "terminate_connection", + fake_terminate_connection) + + self.compute.resize_instance(self.context, instance=instance, + migration=migration_ref, image={}, + instance_type=jsonutils.to_primitive(instance_type)) + + # assert bdm is unchanged + disk_info = db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']) + self.assertEquals(len(disk_info), 1) + for bdm in disk_info: + self.assertEquals(bdm['device_name'], volume['device_name']) + cached_connection_info = jsonutils.loads(bdm['connection_info']) + self.assertEquals(cached_connection_info['data'], + orig_connection_data) + # but connection was terminated + self.assertEquals(connection_info['data'], None) + + # stub out virt driver finish_migration + def fake(*args, **kwargs): + pass + self.stubs.Set(self.compute.driver, 'finish_migration', fake) + + db.instance_update(self.context, instance["uuid"], + {"task_state": task_states.RESIZE_MIGRATED}) + + reservations = self._ensure_quota_reservations_committed() + + # new initialize connection + new_connection_data = dict(orig_connection_data) + new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id, + new_connection_data['target_iqn'] = new_iqn + + def fake_init_conn(self, context, volume, session): + connection_info['data'] = new_connection_data + return connection_info + self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn) + + self.compute.finish_resize(self.context, + migration=jsonutils.to_primitive(migration_ref), + disk_info={}, image={}, instance=instance, + reservations=reservations) + + # assert volume attached correctly + disk_info = db.block_device_mapping_get_all_by_instance( + self.context, instance['uuid']) + self.assertEquals(len(disk_info), 1) + for bdm in disk_info: + self.assertEquals(bdm['connection_info'], + jsonutils.dumps(connection_info)) + + # stub out detach + def fake_detach(self, context, volume_uuid): + volume['device_path'] = None + volume['instance_uuid'] = None + self.stubs.Set(cinder.API, "detach", fake_detach) + + # clean up + self.compute.terminate_instance(self.context, instance=instance) + def test_finish_resize_handles_error(self): # Make sure we don't leave the instance in RESIZE on error.