Merge "Use connection_info on resize"

This commit is contained in:
Jenkins 2013-02-16 08:58:47 +00:00 committed by Gerrit Code Review
commit dc5a994e33
2 changed files with 173 additions and 27 deletions

View File

@ -1861,6 +1861,28 @@ class ComputeManager(manager.SchedulerDependentManager):
migration, migration['source_compute'],
reservations)
def _refresh_block_device_connection_info(self, context, instance):
"""After some operations, the IQN or CHAP, for example, may have
changed. This call updates the DB with the latest connection info.
"""
bdms = self._get_instance_volume_bdms(context, instance)
if not bdms:
return bdms
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
cinfo = self.volume_api.initialize_connection(
context, volume, connector)
self.conductor_api.block_device_mapping_update(
context, bdm['id'],
{'connection_info': jsonutils.dumps(cinfo)})
return bdms
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@reverts_task_state
@wrap_instance_event
@ -1902,15 +1924,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.setup_networks_on_host(context, instance,
migration['source_compute'])
bdms = self._get_instance_volume_bdms(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
bdms = self._refresh_block_device_connection_info(
context, instance)
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume,
connector)
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
self.driver.finish_revert_migration(instance,
self._legacy_nw_info(network_info),
@ -2172,17 +2190,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "finish_resize.start",
network_info=network_info)
bdms = self._get_instance_volume_bdms(context, instance)
bdms = self._refresh_block_device_connection_info(context, instance)
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
if bdms:
connector = self.driver.get_volume_connector(instance)
for bdm in bdms:
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume,
connector)
self.driver.finish_migration(context, migration, instance,
disk_info,
self._legacy_nw_info(network_info),
@ -2752,18 +2764,10 @@ class ComputeManager(manager.SchedulerDependentManager):
required for live migration without shared storage.
"""
# If any volume is mounted, prepare here.
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
if not block_device_info['block_device_mapping']:
LOG.info(_('Instance has no volume.'), instance=instance)
bdms = self._refresh_block_device_connection_info(context, instance)
# assign the volume to host system
# needed by the lefthand volume driver and maybe others
connector = self.driver.get_volume_connector(instance)
for bdm in self._get_instance_volume_bdms(context, instance):
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.initialize_connection(context, volume, connector)
block_device_info = self._get_instance_volume_block_device_info(
context, instance, bdms=bdms)
network_info = self._get_instance_nw_info(context, instance)

View File

@ -1919,6 +1919,148 @@ class ComputeTestCase(BaseTestCase):
reservations=reservations)
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_with_volumes(self):
"""Contrived test to ensure finish_resize doesn't raise anything."""
# create instance
instance = jsonutils.to_primitive(self._create_fake_instance())
# create volume
volume_id = 'fake'
volume = {'instance_uuid': None,
'device_name': None,
'volume_id': volume_id}
# stub out volume attach
def fake_volume_get(self, context, volume):
return volume
self.stubs.Set(cinder.API, "get", fake_volume_get)
orig_connection_data = {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:%s.1' % volume_id,
'target_portal': '127.0.0.0.1:3260',
'volume_id': volume_id,
}
connection_info = {
'driver_volume_type': 'iscsi',
'data': orig_connection_data,
}
def fake_init_conn(self, context, volume, session):
return connection_info
self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
def fake_attach(self, context, volume_id, instance_uuid, device_name):
volume['instance_uuid'] = instance_uuid
volume['device_name'] = device_name
self.stubs.Set(cinder.API, "attach", fake_attach)
# stub out virt driver attach
def fake_get_volume_connector(*args, **kwargs):
return {}
self.stubs.Set(self.compute.driver, 'get_volume_connector',
fake_get_volume_connector)
def fake_attach_volume(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'attach_volume',
fake_attach_volume)
# attach volume to instance
self.compute.attach_volume(self.context, volume['volume_id'],
'/dev/vdc', instance)
# assert volume attached correctly
self.assertEquals(volume['device_name'], '/dev/vdc')
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEquals(len(disk_info), 1)
for bdm in disk_info:
self.assertEquals(bdm['device_name'], volume['device_name'])
self.assertEquals(bdm['connection_info'],
jsonutils.dumps(connection_info))
# begin resize
instance_type = instance_types.get_default_instance_type()
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_PREP})
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
image={})
# NOTE(sirp): `prep_resize` mutates the `system_metadata` attribute in
# the DB but not on the instance passed in, so to sync the two, we need
# to refetch the row from the DB
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
migration_ref = db.migration_get_by_instance_and_status(
self.context.elevated(), instance['uuid'], 'pre-migrating')
# fake out detach for prep_resize (and later terminate)
def fake_terminate_connection(self, context, volume, connector):
connection_info['data'] = None
self.stubs.Set(cinder.API, "terminate_connection",
fake_terminate_connection)
self.compute.resize_instance(self.context, instance=instance,
migration=migration_ref, image={},
instance_type=jsonutils.to_primitive(instance_type))
# assert bdm is unchanged
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEquals(len(disk_info), 1)
for bdm in disk_info:
self.assertEquals(bdm['device_name'], volume['device_name'])
cached_connection_info = jsonutils.loads(bdm['connection_info'])
self.assertEquals(cached_connection_info['data'],
orig_connection_data)
# but connection was terminated
self.assertEquals(connection_info['data'], None)
# stub out virt driver finish_migration
def fake(*args, **kwargs):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
db.instance_update(self.context, instance["uuid"],
{"task_state": task_states.RESIZE_MIGRATED})
reservations = self._ensure_quota_reservations_committed()
# new initialize connection
new_connection_data = dict(orig_connection_data)
new_iqn = 'iqn.2010-10.org.openstack:%s.2' % volume_id,
new_connection_data['target_iqn'] = new_iqn
def fake_init_conn(self, context, volume, session):
connection_info['data'] = new_connection_data
return connection_info
self.stubs.Set(cinder.API, "initialize_connection", fake_init_conn)
self.compute.finish_resize(self.context,
migration=jsonutils.to_primitive(migration_ref),
disk_info={}, image={}, instance=instance,
reservations=reservations)
# assert volume attached correctly
disk_info = db.block_device_mapping_get_all_by_instance(
self.context, instance['uuid'])
self.assertEquals(len(disk_info), 1)
for bdm in disk_info:
self.assertEquals(bdm['connection_info'],
jsonutils.dumps(connection_info))
# stub out detach
def fake_detach(self, context, volume_uuid):
volume['device_path'] = None
volume['instance_uuid'] = None
self.stubs.Set(cinder.API, "detach", fake_detach)
# clean up
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
# Make sure we don't leave the instance in RESIZE on error.