Merge "Refactors handling of detach volume"
This commit is contained in:
@@ -33,7 +33,6 @@ terminating it.
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
|
||||||
import functools
|
import functools
|
||||||
import os
|
import os
|
||||||
import socket
|
import socket
|
||||||
@@ -146,6 +145,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
self.network_api = network.API()
|
self.network_api = network.API()
|
||||||
|
self.volume_api = volume.API()
|
||||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||||
self._last_host_check = 0
|
self._last_host_check = 0
|
||||||
self._last_bw_usage_poll = 0
|
self._last_bw_usage_poll = 0
|
||||||
@@ -245,7 +245,6 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
def _setup_block_device_mapping(self, context, instance):
|
def _setup_block_device_mapping(self, context, instance):
|
||||||
"""setup volumes for block device mapping"""
|
"""setup volumes for block device mapping"""
|
||||||
volume_api = volume.API()
|
|
||||||
block_device_mapping = []
|
block_device_mapping = []
|
||||||
swap = None
|
swap = None
|
||||||
ephemerals = []
|
ephemerals = []
|
||||||
@@ -273,18 +272,18 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
if ((bdm['snapshot_id'] is not None) and
|
if ((bdm['snapshot_id'] is not None) and
|
||||||
(bdm['volume_id'] is None)):
|
(bdm['volume_id'] is None)):
|
||||||
# TODO(yamahata): default name and description
|
# TODO(yamahata): default name and description
|
||||||
vol = volume_api.create(context, bdm['volume_size'],
|
vol = self.volume_api.create(context, bdm['volume_size'],
|
||||||
bdm['snapshot_id'], '', '')
|
bdm['snapshot_id'], '', '')
|
||||||
# TODO(yamahata): creating volume simultaneously
|
# TODO(yamahata): creating volume simultaneously
|
||||||
# reduces creation time?
|
# reduces creation time?
|
||||||
volume_api.wait_creation(context, vol['id'])
|
self.volume_api.wait_creation(context, vol['id'])
|
||||||
self.db.block_device_mapping_update(
|
self.db.block_device_mapping_update(
|
||||||
context, bdm['id'], {'volume_id': vol['id']})
|
context, bdm['id'], {'volume_id': vol['id']})
|
||||||
bdm['volume_id'] = vol['id']
|
bdm['volume_id'] = vol['id']
|
||||||
|
|
||||||
if bdm['volume_id'] is not None:
|
if bdm['volume_id'] is not None:
|
||||||
volume_api.check_attach(context,
|
self.volume_api.check_attach(context,
|
||||||
volume_id=bdm['volume_id'])
|
volume_id=bdm['volume_id'])
|
||||||
cinfo = self._attach_volume_boot(context, instance,
|
cinfo = self._attach_volume_boot(context, instance,
|
||||||
bdm['volume_id'],
|
bdm['volume_id'],
|
||||||
bdm['device_name'])
|
bdm['device_name'])
|
||||||
@@ -507,6 +506,15 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
instance_id)
|
instance_id)
|
||||||
return [bdm for bdm in bdms if bdm['volume_id']]
|
return [bdm for bdm in bdms if bdm['volume_id']]
|
||||||
|
|
||||||
|
def _get_instance_volume_bdm(self, context, instance_id, volume_id):
|
||||||
|
bdms = self._get_instance_volume_bdms(context, instance_id)
|
||||||
|
for bdm in bdms:
|
||||||
|
# NOTE(vish): Comparing as strings because the os_api doesn't
|
||||||
|
# convert to integer and we may wish to support uuids
|
||||||
|
# in the future.
|
||||||
|
if str(bdm['volume_id']) == str(volume_id):
|
||||||
|
return bdm
|
||||||
|
|
||||||
def _get_instance_volume_block_device_info(self, context, instance_id):
|
def _get_instance_volume_block_device_info(self, context, instance_id):
|
||||||
bdms = self._get_instance_volume_bdms(context, instance_id)
|
bdms = self._get_instance_volume_bdms(context, instance_id)
|
||||||
block_device_mapping = []
|
block_device_mapping = []
|
||||||
@@ -515,8 +523,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
block_device_mapping.append({'connection_info': cinfo,
|
block_device_mapping.append({'connection_info': cinfo,
|
||||||
'mount_device':
|
'mount_device':
|
||||||
bdm['device_name']})
|
bdm['device_name']})
|
||||||
## NOTE(vish): The mapping is passed in so the driver can disconnect
|
# NOTE(vish): The mapping is passed in so the driver can disconnect
|
||||||
## from remote volumes if necessary
|
# from remote volumes if necessary
|
||||||
return {'block_device_mapping': block_device_mapping}
|
return {'block_device_mapping': block_device_mapping}
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@@ -546,29 +554,33 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
if not FLAGS.stub_network:
|
if not FLAGS.stub_network:
|
||||||
self.network_api.deallocate_for_instance(context, instance)
|
self.network_api.deallocate_for_instance(context, instance)
|
||||||
|
|
||||||
for bdm in self._get_instance_volume_bdms(context, instance_id):
|
|
||||||
volume_id = bdm['volume_id']
|
|
||||||
try:
|
|
||||||
self._detach_volume(context, instance_uuid, volume_id)
|
|
||||||
except exception.DiskNotFound as exc:
|
|
||||||
LOG.warn(_("Ignoring DiskNotFound: %s") % exc)
|
|
||||||
|
|
||||||
if instance['power_state'] == power_state.SHUTOFF:
|
if instance['power_state'] == power_state.SHUTOFF:
|
||||||
self.db.instance_destroy(context, instance_id)
|
self.db.instance_destroy(context, instance_id)
|
||||||
raise exception.Error(_('trying to destroy already destroyed'
|
raise exception.Error(_('trying to destroy already destroyed'
|
||||||
' instance: %s') % instance_uuid)
|
' instance: %s') % instance_uuid)
|
||||||
|
# NOTE(vish) get bdms before destroying the instance
|
||||||
|
bdms = self._get_instance_volume_bdms(context, instance_id)
|
||||||
block_device_info = self._get_instance_volume_block_device_info(
|
block_device_info = self._get_instance_volume_block_device_info(
|
||||||
context, instance_id)
|
context, instance_id)
|
||||||
self.driver.destroy(instance, network_info, block_device_info, cleanup)
|
self.driver.destroy(instance, network_info, block_device_info, cleanup)
|
||||||
|
for bdm in bdms:
|
||||||
|
try:
|
||||||
|
# NOTE(vish): actual driver detach done in driver.destroy, so
|
||||||
|
# just tell nova-volume that we are done with it.
|
||||||
|
self.volume_api.terminate_connection(context,
|
||||||
|
bdm['volume_id'],
|
||||||
|
FLAGS.my_ip)
|
||||||
|
self.volume_api.detach(context, bdm['volume_id'])
|
||||||
|
except exception.DiskNotFound as exc:
|
||||||
|
LOG.warn(_("Ignoring DiskNotFound: %s") % exc)
|
||||||
|
|
||||||
def _cleanup_volumes(self, context, instance_id):
|
def _cleanup_volumes(self, context, instance_id):
|
||||||
volume_api = volume.API()
|
|
||||||
bdms = self.db.block_device_mapping_get_all_by_instance(context,
|
bdms = self.db.block_device_mapping_get_all_by_instance(context,
|
||||||
instance_id)
|
instance_id)
|
||||||
for bdm in bdms:
|
for bdm in bdms:
|
||||||
LOG.debug(_("terminating bdm %s") % bdm)
|
LOG.debug(_("terminating bdm %s") % bdm)
|
||||||
if bdm['volume_id'] and bdm['delete_on_termination']:
|
if bdm['volume_id'] and bdm['delete_on_termination']:
|
||||||
volume_api.delete(context, bdm['volume_id'])
|
self.volume_api.delete(context, bdm['volume_id'])
|
||||||
# NOTE(vish): bdms will be deleted on instance destroy
|
# NOTE(vish): bdms will be deleted on instance destroy
|
||||||
|
|
||||||
def _delete_instance(self, context, instance):
|
def _delete_instance(self, context, instance):
|
||||||
@@ -1400,13 +1412,13 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
"volume %(volume_id)s at %(mountpoint)s") %
|
"volume %(volume_id)s at %(mountpoint)s") %
|
||||||
locals(), context=context)
|
locals(), context=context)
|
||||||
address = FLAGS.my_ip
|
address = FLAGS.my_ip
|
||||||
volume_api = volume.API()
|
connection_info = self.volume_api.initialize_connection(context,
|
||||||
connection_info = volume_api.initialize_connection(context,
|
volume_id,
|
||||||
volume_id,
|
address)
|
||||||
address)
|
self.volume_api.attach(context, volume_id, instance_id, mountpoint)
|
||||||
volume_api.attach(context, volume_id, instance_id, mountpoint)
|
|
||||||
return connection_info
|
return connection_info
|
||||||
|
|
||||||
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
def attach_volume(self, context, instance_uuid, volume_id, mountpoint):
|
def attach_volume(self, context, instance_uuid, volume_id, mountpoint):
|
||||||
"""Attach a volume to an instance."""
|
"""Attach a volume to an instance."""
|
||||||
@@ -1416,11 +1428,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
LOG.audit(
|
LOG.audit(
|
||||||
_("instance %(instance_uuid)s: attaching volume %(volume_id)s"
|
_("instance %(instance_uuid)s: attaching volume %(volume_id)s"
|
||||||
" to %(mountpoint)s") % locals(), context=context)
|
" to %(mountpoint)s") % locals(), context=context)
|
||||||
volume_api = volume.API()
|
|
||||||
address = FLAGS.my_ip
|
address = FLAGS.my_ip
|
||||||
connection_info = volume_api.initialize_connection(context,
|
connection_info = self.volume_api.initialize_connection(context,
|
||||||
volume_id,
|
volume_id,
|
||||||
address)
|
address)
|
||||||
try:
|
try:
|
||||||
self.driver.attach_volume(connection_info,
|
self.driver.attach_volume(connection_info,
|
||||||
instance_ref['name'],
|
instance_ref['name'],
|
||||||
@@ -1432,10 +1443,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
# ecxception below.
|
# ecxception below.
|
||||||
LOG.exception(_("instance %(instance_uuid)s: attach failed"
|
LOG.exception(_("instance %(instance_uuid)s: attach failed"
|
||||||
" %(mountpoint)s, removing") % locals(), context=context)
|
" %(mountpoint)s, removing") % locals(), context=context)
|
||||||
volume_api.terminate_connection(context, volume_id, address)
|
self.volume_api.terminate_connection(context, volume_id, address)
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
volume_api.attach(context, volume_id, instance_id, mountpoint)
|
self.volume_api.attach(context, volume_id, instance_id, mountpoint)
|
||||||
values = {
|
values = {
|
||||||
'instance_id': instance_id,
|
'instance_id': instance_id,
|
||||||
'connection_info': utils.dumps(connection_info),
|
'connection_info': utils.dumps(connection_info),
|
||||||
@@ -1449,60 +1460,51 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
self.db.block_device_mapping_create(context, values)
|
self.db.block_device_mapping_create(context, values)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
def _detach_volume(self, context, instance_name, bdm):
|
||||||
@checks_instance_lock
|
"""Do the actual driver detach using block device mapping."""
|
||||||
def _detach_volume(self, context, instance_uuid, volume_id,
|
|
||||||
destroy_bdm=False, mark_detached=True,
|
|
||||||
force_detach=False):
|
|
||||||
"""Detach a volume from an instance."""
|
|
||||||
context = context.elevated()
|
|
||||||
instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
|
|
||||||
instance_id = instance_ref['id']
|
|
||||||
bdms = self.db.block_device_mapping_get_all_by_instance(
|
|
||||||
context, instance_id)
|
|
||||||
for item in bdms:
|
|
||||||
# NOTE(vish): Comparing as strings because the os_api doesn't
|
|
||||||
# convert to integer and we may wish to support uuids
|
|
||||||
# in the future.
|
|
||||||
if str(item['volume_id']) == str(volume_id):
|
|
||||||
bdm = item
|
|
||||||
break
|
|
||||||
mp = bdm['device_name']
|
mp = bdm['device_name']
|
||||||
|
volume_id = bdm['volume_id']
|
||||||
|
|
||||||
LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
|
LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
|
||||||
" on instance %(instance_id)s") % locals(), context=context)
|
" on instance %(instance_name)s") % locals(), context=context)
|
||||||
volume_api = volume.API()
|
|
||||||
if (instance_ref['name'] not in self.driver.list_instances() and
|
|
||||||
not force_detach):
|
|
||||||
LOG.warn(_("Detaching volume from unknown instance %s"),
|
|
||||||
instance_id, context=context)
|
|
||||||
else:
|
|
||||||
self.driver.detach_volume(utils.loads(bdm['connection_info']),
|
|
||||||
instance_ref['name'],
|
|
||||||
bdm['device_name'])
|
|
||||||
address = FLAGS.my_ip
|
|
||||||
volume_api.terminate_connection(context, volume_id, address)
|
|
||||||
if mark_detached:
|
|
||||||
volume_api.detach(context, volume_id)
|
|
||||||
if destroy_bdm:
|
|
||||||
self.db.block_device_mapping_destroy_by_instance_and_volume(
|
|
||||||
context, instance_id, volume_id)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
if instance_name not in self.driver.list_instances():
|
||||||
|
LOG.warn(_("Detaching volume from unknown instance %s"),
|
||||||
|
instance_name, context=context)
|
||||||
|
self.driver.detach_volume(utils.loads(bdm['connection_info']),
|
||||||
|
instance_name,
|
||||||
|
mp)
|
||||||
|
|
||||||
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
|
@checks_instance_lock
|
||||||
def detach_volume(self, context, instance_uuid, volume_id):
|
def detach_volume(self, context, instance_uuid, volume_id):
|
||||||
"""Detach a volume from an instance."""
|
"""Detach a volume from an instance."""
|
||||||
return self._detach_volume(context, instance_uuid, volume_id, True)
|
instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
|
||||||
|
instance_id = instance_ref['id']
|
||||||
|
bdm = self._get_instance_volume_bdm(context, instance_id, volume_id)
|
||||||
|
self._detach_volume(context, instance_ref['name'], bdm)
|
||||||
|
self.volume_api.terminate_connection(context, volume_id, FLAGS.my_ip)
|
||||||
|
self.volume_api.detach(context, volume_id)
|
||||||
|
self.db.block_device_mapping_destroy_by_instance_and_volume(
|
||||||
|
context, instance_id, volume_id)
|
||||||
|
return True
|
||||||
|
|
||||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||||
def remove_volume_connection(self, context, instance_id, volume_id):
|
def remove_volume_connection(self, context, instance_id, volume_id):
|
||||||
"""Detach a volume from an instance.,"""
|
"""Remove a volume connection using the volume api"""
|
||||||
# NOTE(vish): We don't want to actually mark the volume
|
# NOTE(vish): We don't want to actually mark the volume
|
||||||
# detached, or delete the bdm, just remove the
|
# detached, or delete the bdm, just remove the
|
||||||
# connection from this host.
|
# connection from this host.
|
||||||
try:
|
try:
|
||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
self._detach_volume(context, instance_ref['uuid'], volume_id,
|
bdm = self._get_instance_volume_bdm(context,
|
||||||
False, False, True)
|
instance_id,
|
||||||
|
volume_id)
|
||||||
|
self._detach_volume(context, instance_ref['name'],
|
||||||
|
bdm['volume_id'], bdm['device_name'])
|
||||||
|
self.volume_api.terminate_connection(context,
|
||||||
|
volume_id,
|
||||||
|
FLAGS.my_ip)
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -1821,8 +1823,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
for bdm in self._get_instance_volume_bdms(context, instance_ref['id']):
|
for bdm in self._get_instance_volume_bdms(context, instance_ref['id']):
|
||||||
volume_id = bdm['volume_id']
|
volume_id = bdm['volume_id']
|
||||||
self.db.volume_update(context, volume_id, {'status': 'in-use'})
|
self.db.volume_update(context, volume_id, {'status': 'in-use'})
|
||||||
volume.API().remove_from_compute(context, instance_ref['id'],
|
self.volume_api.remove_from_compute(context, instance_ref['id'],
|
||||||
volume_id, dest)
|
volume_id, dest)
|
||||||
|
|
||||||
# Block migration needs empty image at destination host
|
# Block migration needs empty image at destination host
|
||||||
# before migration starts, so if any failure occurs,
|
# before migration starts, so if any failure occurs,
|
||||||
|
|||||||
Reference in New Issue
Block a user