Merge "Remove TaskRunner from Volume resources"
This commit is contained in:
commit
6cf94a3ece
@ -22,6 +22,7 @@ from heat.common.i18n import _
|
||||
from heat.common.i18n import _LI
|
||||
from heat.engine.clients import client_plugin
|
||||
from heat.engine import constraints
|
||||
from heat.engine import resource
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -127,6 +128,87 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
|
||||
return (isinstance(ex, exceptions.ClientException) and
|
||||
ex.code == 409)
|
||||
|
||||
def check_detach_volume_complete(self, vol_id):
|
||||
try:
|
||||
vol = self.client().volumes.get(vol_id)
|
||||
except Exception as ex:
|
||||
self.ignore_not_found(ex)
|
||||
return True
|
||||
|
||||
if vol.status in ('in-use', 'detaching'):
|
||||
LOG.debug('%s - volume still in use' % vol_id)
|
||||
return False
|
||||
|
||||
LOG.debug('Volume %(id)s - status: %(status)s' % {
|
||||
'id': vol.id, 'status': vol.status})
|
||||
|
||||
if vol.status not in ('available', 'deleting'):
|
||||
LOG.debug("Detachment failed - volume %(vol)s "
|
||||
"is in %(status)s status" % {"vol": vol.id,
|
||||
"status": vol.status})
|
||||
raise resource.ResourceUnknownStatus(
|
||||
resource_status=vol.status,
|
||||
result=_('Volume detachment failed'))
|
||||
else:
|
||||
return True
|
||||
|
||||
def check_attach_volume_complete(self, vol_id):
|
||||
vol = self.client().volumes.get(vol_id)
|
||||
if vol.status in ('available', 'attaching'):
|
||||
LOG.debug("Volume %(id)s is being attached - "
|
||||
"volume status: %(status)s" % {'id': vol_id,
|
||||
'status': vol.status})
|
||||
return False
|
||||
|
||||
if vol.status != 'in-use':
|
||||
LOG.debug("Attachment failed - volume %(vol)s is "
|
||||
"in %(status)s status" % {"vol": vol_id,
|
||||
"status": vol.status})
|
||||
raise resource.ResourceUnknownStatus(
|
||||
resource_status=vol.status,
|
||||
result=_('Volume attachment failed'))
|
||||
|
||||
LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id})
|
||||
return True
|
||||
|
||||
|
||||
# NOTE(pshchelo): these Volume*Progress classes are simple key-value storages
|
||||
# meant to be passed between handle_<action> and check_<action>_complete,
|
||||
# being mutated during subsequent check_<action>_complete calls.
|
||||
class VolumeDetachProgress(object):
|
||||
def __init__(self, srv_id, vol_id, attach_id, val=False):
|
||||
self.called = val
|
||||
self.cinder_complete = val
|
||||
self.nova_complete = val
|
||||
self.srv_id = srv_id
|
||||
self.vol_id = vol_id
|
||||
self.attach_id = attach_id
|
||||
|
||||
|
||||
class VolumeAttachProgress(object):
|
||||
def __init__(self, srv_id, vol_id, device, val=False):
|
||||
self.called = val
|
||||
self.complete = val
|
||||
self.srv_id = srv_id
|
||||
self.vol_id = vol_id
|
||||
self.device = device
|
||||
|
||||
|
||||
class VolumeDeleteProgress(object):
|
||||
def __init__(self, val=False):
|
||||
self.backup = {'called': val,
|
||||
'complete': val}
|
||||
self.delete = {'called': val,
|
||||
'complete': val}
|
||||
self.backup_id = None
|
||||
|
||||
|
||||
class VolumeResizeProgress(object):
|
||||
def __init__(self, val=False, size=None):
|
||||
self.called = val
|
||||
self.complete = val
|
||||
self.size = size
|
||||
|
||||
|
||||
class VolumeConstraint(constraints.BaseCustomConstraint):
|
||||
|
||||
|
@ -32,6 +32,7 @@ from six.moves.urllib import parse as urlparse
|
||||
|
||||
from heat.common import exception
|
||||
from heat.common.i18n import _
|
||||
from heat.common.i18n import _LI
|
||||
from heat.common.i18n import _LW
|
||||
from heat.engine.clients import client_plugin
|
||||
from heat.engine import constraints
|
||||
@ -495,6 +496,55 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
|
||||
|
||||
return net_id
|
||||
|
||||
def attach_volume(self, server_id, volume_id, device):
|
||||
try:
|
||||
va = self.client().volumes.create_server_volume(
|
||||
server_id=server_id,
|
||||
volume_id=volume_id,
|
||||
device=device)
|
||||
except Exception as ex:
|
||||
if self.is_client_exception(ex):
|
||||
raise exception.Error(_(
|
||||
"Failed to attach volume %(vol)s to server %(srv)s "
|
||||
"- %(err)s") % {'vol': volume_id,
|
||||
'srv': server_id,
|
||||
'err': ex})
|
||||
else:
|
||||
raise
|
||||
return va.id
|
||||
|
||||
def detach_volume(self, server_id, attach_id):
|
||||
# detach the volume using volume_attachment
|
||||
try:
|
||||
self.client().volumes.delete_server_volume(server_id, attach_id)
|
||||
except Exception as ex:
|
||||
if not (self.is_not_found(ex)
|
||||
or self.is_bad_request(ex)):
|
||||
raise exception.Error(
|
||||
_("Could not detach attachment %(att)s "
|
||||
"from server %(srv)s.") % {'srv': server_id,
|
||||
'att': attach_id})
|
||||
|
||||
def check_detach_volume_complete(self, server_id, attach_id):
|
||||
"""Check that nova server lost attachment.
|
||||
|
||||
This check is needed for immediate reattachment when updating:
|
||||
there might be some time between cinder marking volume as 'available'
|
||||
and nova removing attachment from its own objects, so we
|
||||
check that nova already knows that the volume is detached.
|
||||
"""
|
||||
try:
|
||||
self.client().volumes.get_server_volume(server_id, attach_id)
|
||||
except Exception as ex:
|
||||
self.ignore_not_found(ex)
|
||||
LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"),
|
||||
{'vol': attach_id, 'srv': server_id})
|
||||
return True
|
||||
else:
|
||||
LOG.debug("Server %(srv)s still has attachment %(att)s." % {
|
||||
'att': attach_id, 'srv': server_id})
|
||||
return False
|
||||
|
||||
|
||||
class ServerConstraint(constraints.BaseCustomConstraint):
|
||||
|
||||
|
@ -19,12 +19,12 @@ from heat.common import exception
|
||||
from heat.common.i18n import _
|
||||
from heat.common.i18n import _LI
|
||||
from heat.engine import attributes
|
||||
from heat.engine.clients.os import cinder as heat_cinder
|
||||
from heat.engine import constraints
|
||||
from heat.engine import properties
|
||||
from heat.engine import resource
|
||||
from heat.engine.resources import volume_base as vb
|
||||
from heat.engine import scheduler
|
||||
from heat.engine import support
|
||||
from heat.engine import volume_tasks as vol_task
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -236,10 +236,41 @@ class CinderVolume(vb.BaseVolume):
|
||||
|
||||
return vol_id
|
||||
|
||||
def _extend_volume(self, new_size):
|
||||
try:
|
||||
self.client().volumes.extend(self.resource_id, new_size)
|
||||
except Exception as ex:
|
||||
if self.client_plugin().is_client_exception(ex):
|
||||
raise exception.Error(_(
|
||||
"Failed to extend volume %(vol)s - %(err)s") % {
|
||||
'vol': self.resource_id, 'err': str(ex)})
|
||||
else:
|
||||
raise
|
||||
return True
|
||||
|
||||
def _check_extend_volume_complete(self):
|
||||
vol = self.client().volumes.get(self.resource_id)
|
||||
if vol.status == 'extending':
|
||||
LOG.debug("Volume %s is being extended" % vol.id)
|
||||
return False
|
||||
|
||||
if vol.status != 'available':
|
||||
LOG.info(_LI("Resize failed: Volume %(vol)s "
|
||||
"is in %(status)s state."),
|
||||
{'vol': vol.id, 'status': vol.status})
|
||||
raise resource.ResourceUnknownStatus(
|
||||
resource_status=vol.status,
|
||||
result=_('Volume resize failed'))
|
||||
|
||||
LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id})
|
||||
return True
|
||||
|
||||
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
||||
vol = None
|
||||
checkers = []
|
||||
cinder = self.client()
|
||||
prg_resize = None
|
||||
prg_attach = None
|
||||
prg_detach = None
|
||||
# update the name and description for cinder volume
|
||||
if self.NAME in prop_diff or self.DESCRIPTION in prop_diff:
|
||||
vol = cinder.volumes.get(self.resource_id)
|
||||
@ -268,6 +299,10 @@ class CinderVolume(vb.BaseVolume):
|
||||
vol = cinder.volumes.get(self.resource_id)
|
||||
new_vol_type = prop_diff.get(self.VOLUME_TYPE)
|
||||
cinder.volumes.retype(vol, new_vol_type, 'never')
|
||||
# update read_only access mode
|
||||
if self.READ_ONLY in prop_diff:
|
||||
flag = prop_diff.get(self.READ_ONLY)
|
||||
cinder.volumes.update_readonly_flag(self.resource_id, flag)
|
||||
# extend volume size
|
||||
if self.SIZE in prop_diff:
|
||||
if not vol:
|
||||
@ -278,6 +313,7 @@ class CinderVolume(vb.BaseVolume):
|
||||
raise exception.NotSupported(feature=_("Shrinking volume"))
|
||||
|
||||
elif new_size > vol.size:
|
||||
prg_resize = heat_cinder.VolumeResizeProgress(size=new_size)
|
||||
if vol.attachments:
|
||||
# NOTE(pshchelo):
|
||||
# this relies on current behavior of cinder attachments,
|
||||
@ -289,35 +325,59 @@ class CinderVolume(vb.BaseVolume):
|
||||
server_id = vol.attachments[0]['server_id']
|
||||
device = vol.attachments[0]['device']
|
||||
attachment_id = vol.attachments[0]['id']
|
||||
detach_task = vol_task.VolumeDetachTask(
|
||||
self.stack, server_id, attachment_id)
|
||||
checkers.append(scheduler.TaskRunner(detach_task))
|
||||
extend_task = vol_task.VolumeExtendTask(
|
||||
self.stack, vol.id, new_size)
|
||||
checkers.append(scheduler.TaskRunner(extend_task))
|
||||
attach_task = vol_task.VolumeAttachTask(
|
||||
self.stack, server_id, vol.id, device)
|
||||
checkers.append(scheduler.TaskRunner(attach_task))
|
||||
prg_detach = heat_cinder.VolumeDetachProgress(
|
||||
server_id, vol.id, attachment_id)
|
||||
prg_attach = heat_cinder.VolumeAttachProgress(
|
||||
server_id, vol.id, device)
|
||||
|
||||
else:
|
||||
extend_task = vol_task.VolumeExtendTask(
|
||||
self.stack, vol.id, new_size)
|
||||
checkers.append(scheduler.TaskRunner(extend_task))
|
||||
# update read_only access mode
|
||||
if self.READ_ONLY in prop_diff:
|
||||
flag = prop_diff.get(self.READ_ONLY)
|
||||
cinder.volumes.update_readonly_flag(self.resource_id, flag)
|
||||
return prg_detach, prg_resize, prg_attach
|
||||
|
||||
if checkers:
|
||||
checkers[0].start()
|
||||
return checkers
|
||||
def _detach_volume_to_complete(self, prg_detach):
|
||||
if not prg_detach.called:
|
||||
self.client_plugin('nova').detach_volume(prg_detach.srv_id,
|
||||
prg_detach.attach_id)
|
||||
prg_detach.called = True
|
||||
return False
|
||||
if not prg_detach.cinder_complete:
|
||||
cinder_complete_res = self.client_plugin(
|
||||
).check_detach_volume_complete(prg_detach.vol_id)
|
||||
prg_detach.cinder_complete = cinder_complete_res
|
||||
return False
|
||||
if not prg_detach.nova_complete:
|
||||
prg_detach.nova_complete = self.client_plugin(
|
||||
'nova').check_detach_volume_complete(prg_detach.srv_id,
|
||||
prg_detach.attach_id)
|
||||
return False
|
||||
|
||||
def _attach_volume_to_complete(self, prg_attach):
|
||||
if not prg_attach.called:
|
||||
prg_attach.called = self.client_plugin('nova').attach_volume(
|
||||
prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
|
||||
return False
|
||||
if not prg_attach.complete:
|
||||
prg_attach.complete = self.client_plugin(
|
||||
).check_attach_volume_complete(prg_attach.vol_id)
|
||||
return prg_attach.complete
|
||||
|
||||
def check_update_complete(self, checkers):
|
||||
for checker in checkers:
|
||||
if not checker.started():
|
||||
checker.start()
|
||||
if not checker.step():
|
||||
prg_detach, prg_resize, prg_attach = checkers
|
||||
if not prg_resize:
|
||||
return True
|
||||
# detach volume
|
||||
if prg_detach:
|
||||
if not prg_detach.nova_complete:
|
||||
self._detach_volume_to_complete(prg_detach)
|
||||
return False
|
||||
# resize volume
|
||||
if not prg_resize.called:
|
||||
prg_resize.called = self._extend_volume(prg_resize.size)
|
||||
return False
|
||||
if not prg_resize.complete:
|
||||
prg_resize.complete = self._check_extend_volume_complete()
|
||||
return prg_resize.complete and not prg_attach
|
||||
# reattach volume back
|
||||
if prg_attach:
|
||||
return self._attach_volume_to_complete(prg_attach)
|
||||
return True
|
||||
|
||||
def handle_snapshot(self):
|
||||
@ -334,24 +394,25 @@ class CinderVolume(vb.BaseVolume):
|
||||
raise exception.Error(backup.fail_reason)
|
||||
|
||||
def handle_delete_snapshot(self, snapshot):
|
||||
backup_id = snapshot['resource_data'].get('backup_id')
|
||||
backup_id = snapshot['resource_data']['backup_id']
|
||||
try:
|
||||
self.client().backups.delete(backup_id)
|
||||
except Exception as ex:
|
||||
self.client_plugin().ignore_not_found(ex)
|
||||
return
|
||||
else:
|
||||
return backup_id
|
||||
|
||||
def delete():
|
||||
cinder = self.client()
|
||||
try:
|
||||
cinder.backups.delete(backup_id)
|
||||
while True:
|
||||
yield
|
||||
cinder.backups.get(backup_id)
|
||||
except Exception as ex:
|
||||
self.client_plugin().ignore_not_found(ex)
|
||||
|
||||
delete_task = scheduler.TaskRunner(delete)
|
||||
delete_task.start()
|
||||
return delete_task
|
||||
|
||||
def check_delete_snapshot_complete(self, delete_task):
|
||||
return delete_task.step()
|
||||
def check_delete_snapshot_complete(self, backup_id):
|
||||
if not backup_id:
|
||||
return True
|
||||
try:
|
||||
self.client().backups.get(backup_id)
|
||||
except Exception as ex:
|
||||
self.client_plugin().ignore_not_found(ex)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _build_exclusive_options(self):
|
||||
exclusive_options = []
|
||||
@ -463,13 +524,21 @@ class CinderVolumeAttachment(vb.BaseVolumeAttachment):
|
||||
}
|
||||
|
||||
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
||||
checkers = []
|
||||
prg_attach = None
|
||||
prg_detach = None
|
||||
if prop_diff:
|
||||
# Even though some combinations of changed properties
|
||||
# could be updated in UpdateReplace manner,
|
||||
# we still first detach the old resource so that
|
||||
# self.resource_id is not replaced prematurely
|
||||
volume_id = self.properties[self.VOLUME_ID]
|
||||
server_id = self._stored_properties_data.get(self.INSTANCE_ID)
|
||||
self.client_plugin('nova').detach_volume(server_id,
|
||||
self.resource_id)
|
||||
prg_detach = heat_cinder.VolumeDetachProgress(
|
||||
server_id, volume_id, self.resource_id)
|
||||
prg_detach.called = True
|
||||
|
||||
if self.VOLUME_ID in prop_diff:
|
||||
volume_id = prop_diff.get(self.VOLUME_ID)
|
||||
|
||||
@ -477,29 +546,36 @@ class CinderVolumeAttachment(vb.BaseVolumeAttachment):
|
||||
if self.DEVICE in prop_diff:
|
||||
device = prop_diff.get(self.DEVICE)
|
||||
|
||||
server_id = self._stored_properties_data.get(self.INSTANCE_ID)
|
||||
detach_task = vol_task.VolumeDetachTask(
|
||||
self.stack, server_id, self.resource_id)
|
||||
checkers.append(scheduler.TaskRunner(detach_task))
|
||||
|
||||
if self.INSTANCE_ID in prop_diff:
|
||||
server_id = prop_diff.get(self.INSTANCE_ID)
|
||||
attach_task = vol_task.VolumeAttachTask(
|
||||
self.stack, server_id, volume_id, device)
|
||||
prg_attach = heat_cinder.VolumeAttachProgress(
|
||||
server_id, volume_id, device)
|
||||
|
||||
checkers.append(scheduler.TaskRunner(attach_task))
|
||||
|
||||
if checkers:
|
||||
checkers[0].start()
|
||||
return checkers
|
||||
return prg_detach, prg_attach
|
||||
|
||||
def check_update_complete(self, checkers):
|
||||
for checker in checkers:
|
||||
if not checker.started():
|
||||
checker.start()
|
||||
if not checker.step():
|
||||
return False
|
||||
self.resource_id_set(checkers[-1]._task.attachment_id)
|
||||
prg_detach, prg_attach = checkers
|
||||
if not (prg_detach and prg_attach):
|
||||
return True
|
||||
if not prg_detach.cinder_complete:
|
||||
prg_detach.cinder_complete = self.client_plugin(
|
||||
).check_detach_volume_complete(prg_detach.vol_id)
|
||||
return False
|
||||
if not prg_detach.nova_complete:
|
||||
prg_detach.nova_complete = self.client_plugin(
|
||||
'nova').check_detach_volume_complete(prg_detach.srv_id,
|
||||
self.resource_id)
|
||||
return False
|
||||
if not prg_attach.called:
|
||||
prg_attach.called = self.client_plugin('nova').attach_volume(
|
||||
prg_attach.srv_id, prg_attach.vol_id, prg_attach.device)
|
||||
return False
|
||||
if not prg_attach.complete:
|
||||
prg_attach.complete = self.client_plugin(
|
||||
).check_attach_volume_complete(prg_attach.vol_id)
|
||||
if prg_attach.complete:
|
||||
self.resource_id_set(prg_attach.called)
|
||||
return prg_attach.complete
|
||||
return True
|
||||
|
||||
|
||||
|
@ -13,9 +13,8 @@
|
||||
|
||||
from heat.common import exception
|
||||
from heat.common.i18n import _
|
||||
from heat.engine.clients.os import cinder as heat_cinder
|
||||
from heat.engine import resource
|
||||
from heat.engine import scheduler
|
||||
from heat.engine import volume_tasks as vol_task
|
||||
|
||||
|
||||
class BaseVolume(resource.Resource):
|
||||
@ -82,55 +81,80 @@ class BaseVolume(resource.Resource):
|
||||
]
|
||||
self._verify_check_conditions(checks)
|
||||
|
||||
def _backup(self):
|
||||
cinder = self.client()
|
||||
backup = cinder.backups.create(self.resource_id)
|
||||
while backup.status == 'creating':
|
||||
yield
|
||||
backup = cinder.backups.get(backup.id)
|
||||
if backup.status != 'available':
|
||||
def handle_snapshot_delete(self, state):
|
||||
backup = state not in ((self.CREATE, self.FAILED),
|
||||
(self.UPDATE, self.FAILED))
|
||||
progress = heat_cinder.VolumeDeleteProgress()
|
||||
progress.backup['called'] = not backup
|
||||
progress.backup['complete'] = not backup
|
||||
return progress
|
||||
|
||||
def handle_delete(self):
|
||||
if self.resource_id is None:
|
||||
return heat_cinder.VolumeDeleteProgress(True)
|
||||
progress = heat_cinder.VolumeDeleteProgress()
|
||||
progress.backup['called'] = True
|
||||
progress.backup['complete'] = True
|
||||
return progress
|
||||
|
||||
def _create_backup(self):
|
||||
backup = self.client().backups.create(self.resource_id)
|
||||
return backup.id
|
||||
|
||||
def _check_create_backup_complete(self, prg):
|
||||
backup = self.client().backups.get(prg.backup_id)
|
||||
if backup.status == 'creating':
|
||||
return False
|
||||
if backup.status == 'available':
|
||||
return True
|
||||
else:
|
||||
raise resource.ResourceUnknownStatus(
|
||||
resource_status=backup.status,
|
||||
result=_('Volume backup failed'))
|
||||
|
||||
@scheduler.wrappertask
|
||||
def _delete(self, backup=False):
|
||||
if self.resource_id is not None:
|
||||
def _delete_volume(self):
|
||||
try:
|
||||
cinder = self.client()
|
||||
vol = cinder.volumes.get(self.resource_id)
|
||||
if vol.status == 'in-use':
|
||||
raise exception.Error(_('Volume in use'))
|
||||
# if the volume is already in deleting status,
|
||||
# just wait for the deletion to complete
|
||||
if vol.status != 'deleting':
|
||||
cinder.volumes.delete(self.resource_id)
|
||||
else:
|
||||
return True
|
||||
except Exception as ex:
|
||||
self.client_plugin().ignore_not_found(ex)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def check_delete_complete(self, prg):
|
||||
if not prg.backup['called']:
|
||||
prg.backup_id = self._create_backup()
|
||||
prg.backup['called'] = True
|
||||
return False
|
||||
|
||||
if not prg.backup['complete']:
|
||||
prg.backup['complete'] = self._check_create_backup_complete(prg)
|
||||
return False
|
||||
|
||||
if not prg.delete['called']:
|
||||
prg.delete['complete'] = self._delete_volume()
|
||||
prg.delete['called'] = True
|
||||
return False
|
||||
|
||||
if not prg.delete['complete']:
|
||||
try:
|
||||
vol = cinder.volumes.get(self.resource_id)
|
||||
|
||||
if backup:
|
||||
yield self._backup()
|
||||
vol = cinder.volumes.get(self.resource_id)
|
||||
|
||||
if vol.status == 'in-use':
|
||||
raise exception.Error(_('Volume in use'))
|
||||
# if the volume is already in deleting status,
|
||||
# just wait for the deletion to complete
|
||||
if vol.status != 'deleting':
|
||||
cinder.volumes.delete(self.resource_id)
|
||||
while True:
|
||||
yield
|
||||
vol = cinder.volumes.get(self.resource_id)
|
||||
self.client().volumes.get(self.resource_id)
|
||||
except Exception as ex:
|
||||
self.client_plugin().ignore_not_found(ex)
|
||||
|
||||
def handle_snapshot_delete(self, state):
|
||||
backup = state not in ((self.CREATE, self.FAILED),
|
||||
(self.UPDATE, self.FAILED))
|
||||
|
||||
delete_task = scheduler.TaskRunner(self._delete, backup=backup)
|
||||
delete_task.start()
|
||||
return delete_task
|
||||
|
||||
def handle_delete(self):
|
||||
delete_task = scheduler.TaskRunner(self._delete)
|
||||
delete_task.start()
|
||||
return delete_task
|
||||
|
||||
def check_delete_complete(self, delete_task):
|
||||
return delete_task.step()
|
||||
prg.delete['complete'] = True
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BaseVolumeAttachment(resource.Resource):
|
||||
@ -138,31 +162,41 @@ class BaseVolumeAttachment(resource.Resource):
|
||||
Base Volume Attachment Manager.
|
||||
'''
|
||||
|
||||
default_client_name = 'cinder'
|
||||
|
||||
def handle_create(self):
|
||||
server_id = self.properties[self.INSTANCE_ID]
|
||||
volume_id = self.properties[self.VOLUME_ID]
|
||||
dev = self.properties[self.DEVICE]
|
||||
|
||||
attach_task = vol_task.VolumeAttachTask(
|
||||
self.stack, server_id, volume_id, dev)
|
||||
attach_runner = scheduler.TaskRunner(attach_task)
|
||||
attach_id = self.client_plugin('nova').attach_volume(
|
||||
server_id, volume_id, dev)
|
||||
|
||||
attach_runner.start()
|
||||
self.resource_id_set(attach_id)
|
||||
|
||||
self.resource_id_set(attach_task.attachment_id)
|
||||
return volume_id
|
||||
|
||||
return attach_runner
|
||||
|
||||
def check_create_complete(self, attach_runner):
|
||||
return attach_runner.step()
|
||||
def check_create_complete(self, volume_id):
|
||||
return self.client_plugin().check_attach_volume_complete(volume_id)
|
||||
|
||||
def handle_delete(self):
|
||||
server_id = self.properties[self.INSTANCE_ID]
|
||||
detach_task = vol_task.VolumeDetachTask(
|
||||
self.stack, server_id, self.resource_id)
|
||||
detach_runner = scheduler.TaskRunner(detach_task)
|
||||
detach_runner.start()
|
||||
return detach_runner
|
||||
vol_id = self.properties[self.VOLUME_ID]
|
||||
self.client_plugin('nova').detach_volume(server_id,
|
||||
self.resource_id)
|
||||
prg = heat_cinder.VolumeDetachProgress(
|
||||
server_id, vol_id, self.resource_id)
|
||||
prg.called = True
|
||||
return prg
|
||||
|
||||
def check_delete_complete(self, detach_runner):
|
||||
return detach_runner.step()
|
||||
def check_delete_complete(self, prg):
|
||||
if not prg.cinder_complete:
|
||||
prg.cinder_complete = self.client_plugin(
|
||||
).check_detach_volume_complete(prg.vol_id)
|
||||
return False
|
||||
if not prg.nova_complete:
|
||||
prg.nova_complete = self.client_plugin(
|
||||
'nova').check_detach_volume_complete(prg.srv_id,
|
||||
prg.attach_id)
|
||||
return prg.nova_complete
|
||||
return True
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from heat.common import exception
|
||||
from heat.common.i18n import _
|
||||
from heat.common.i18n import _LI
|
||||
from heat.engine import resource
|
||||
@ -21,56 +20,6 @@ from heat.engine import resource
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VolumeExtendTask(object):
|
||||
"""A task to resize volume using Cinder API."""
|
||||
|
||||
def __init__(self, stack, volume_id, size):
|
||||
self.clients = stack.clients
|
||||
self.volume_id = volume_id
|
||||
self.size = size
|
||||
|
||||
def __str__(self):
|
||||
return _("Resizing volume %(vol)s to size %(size)i") % {
|
||||
'vol': self.volume_id, 'size': self.size}
|
||||
|
||||
def __repr__(self):
|
||||
return "%s(%s +-> %i)" % (type(self).__name__, self.volume_id,
|
||||
self.size)
|
||||
|
||||
def __call__(self):
|
||||
LOG.debug(str(self))
|
||||
|
||||
cinder = self.clients.client('cinder').volumes
|
||||
vol = cinder.get(self.volume_id)
|
||||
|
||||
try:
|
||||
cinder.extend(self.volume_id, self.size)
|
||||
except Exception as ex:
|
||||
if self.clients.client_plugin('cinder').is_client_exception(ex):
|
||||
raise exception.Error(_(
|
||||
"Failed to extend volume %(vol)s - %(err)s") % {
|
||||
'vol': vol.id, 'err': ex})
|
||||
else:
|
||||
raise
|
||||
|
||||
yield
|
||||
|
||||
vol = cinder.get(self.volume_id)
|
||||
while vol.status == 'extending':
|
||||
LOG.debug("Volume %s is being extended" % self.volume_id)
|
||||
yield
|
||||
vol = cinder.get(self.volume_id)
|
||||
|
||||
if vol.status != 'available':
|
||||
LOG.info(_LI("Resize failed: Volume %(vol)s is in %(status)s "
|
||||
"state."), {'vol': vol.id, 'status': vol.status})
|
||||
raise resource.ResourceUnknownStatus(
|
||||
resource_status=vol.status,
|
||||
result=_('Volume resize failed'))
|
||||
|
||||
LOG.info(_LI('%s - complete'), str(self))
|
||||
|
||||
|
||||
class VolumeAttachTask(object):
|
||||
"""A task for attaching a volume to a Nova server."""
|
||||
|
||||
@ -128,102 +77,3 @@ class VolumeAttachTask(object):
|
||||
result=_('Volume attachment failed'))
|
||||
|
||||
LOG.info(_LI('%s - complete'), str(self))
|
||||
|
||||
|
||||
class VolumeDetachTask(object):
|
||||
"""A task for detaching a volume from a Nova server."""
|
||||
|
||||
def __init__(self, stack, server_id, attachment_id):
|
||||
"""
|
||||
Initialise with the stack (for obtaining the clients), and the IDs of
|
||||
the server and volume.
|
||||
"""
|
||||
self.clients = stack.clients
|
||||
self.server_id = server_id
|
||||
self.attachment_id = attachment_id
|
||||
|
||||
def __str__(self):
|
||||
"""Return a human-readable string description of the task."""
|
||||
return _('Removing attachment %(att)s from Instance %(srv)s') % {
|
||||
'att': self.attachment_id, 'srv': self.server_id}
|
||||
|
||||
def __repr__(self):
|
||||
"""Return a brief string description of the task."""
|
||||
return '%s(%s -/> %s)' % (type(self).__name__,
|
||||
self.attachment_id,
|
||||
self.server_id)
|
||||
|
||||
def __call__(self):
|
||||
"""Return a co-routine which runs the task."""
|
||||
LOG.debug(str(self))
|
||||
|
||||
nova_plugin = self.clients.client_plugin('nova')
|
||||
cinder_plugin = self.clients.client_plugin('cinder')
|
||||
server_api = self.clients.client('nova').volumes
|
||||
cinder = self.clients.client('cinder')
|
||||
# get reference to the volume while it is attached
|
||||
try:
|
||||
nova_vol = server_api.get_server_volume(self.server_id,
|
||||
self.attachment_id)
|
||||
vol = cinder.volumes.get(nova_vol.id)
|
||||
except Exception as ex:
|
||||
if (cinder_plugin.is_not_found(ex) or
|
||||
nova_plugin.is_not_found(ex) or
|
||||
nova_plugin.is_bad_request(ex)):
|
||||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
if vol.status == 'deleting':
|
||||
return
|
||||
|
||||
# detach the volume using volume_attachment
|
||||
try:
|
||||
server_api.delete_server_volume(self.server_id, self.attachment_id)
|
||||
except Exception as ex:
|
||||
if nova_plugin.is_not_found(ex) or nova_plugin.is_bad_request(ex):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
yield
|
||||
|
||||
try:
|
||||
while vol.status in ('in-use', 'detaching'):
|
||||
LOG.debug('%s - volume still in use' % str(self))
|
||||
yield
|
||||
vol = cinder.volumes.get(nova_vol.id)
|
||||
|
||||
LOG.info(_LI('%(name)s - status: %(status)s'),
|
||||
{'name': str(self), 'status': vol.status})
|
||||
if vol.status not in ['available', 'deleting']:
|
||||
LOG.info(_LI("Detachment failed - volume %(vol)s "
|
||||
"is in %(status)s status"),
|
||||
{"vol": vol.id,
|
||||
"status": vol.status})
|
||||
raise resource.ResourceUnknownStatus(
|
||||
resource_status=vol.status,
|
||||
result=_('Volume detachment failed'))
|
||||
|
||||
except Exception as ex:
|
||||
cinder_plugin.ignore_not_found(ex)
|
||||
|
||||
# The next check is needed for immediate reattachment when updating:
|
||||
# there might be some time between cinder marking volume as 'available'
|
||||
# and nova removing attachment from its own objects, so we
|
||||
# check that nova already knows that the volume is detached
|
||||
def server_has_attachment(server_id, attachment_id):
|
||||
try:
|
||||
server_api.get_server_volume(server_id, attachment_id)
|
||||
except Exception as ex:
|
||||
nova_plugin.ignore_not_found(ex)
|
||||
return False
|
||||
return True
|
||||
|
||||
while server_has_attachment(self.server_id, self.attachment_id):
|
||||
LOG.info(_LI("Server %(srv)s still has attachment %(att)s."),
|
||||
{'att': self.attachment_id, 'srv': self.server_id})
|
||||
yield
|
||||
|
||||
LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"),
|
||||
{'vol': vol.id, 'srv': self.server_id})
|
||||
|
@ -308,10 +308,13 @@ class VolumeTest(vt_base.BaseVolumeTest):
|
||||
self._mock_create_server_volume_script(fva)
|
||||
self.stub_VolumeConstraint_validate()
|
||||
# delete script
|
||||
self.fc.volumes.get_server_volume(u'WikiDatabase',
|
||||
'vol-123').AndReturn(fva)
|
||||
self.fc.volumes.delete_server_volume(u'WikiDatabase',
|
||||
'vol-123').AndReturn(None)
|
||||
self.cinder_fc.volumes.get(fva.id).AndRaise(
|
||||
cinder_exp.NotFound('Not found'))
|
||||
self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123'
|
||||
).AndRaise(
|
||||
fakes_nova.fake_exception())
|
||||
|
||||
self.m.ReplayAll()
|
||||
|
||||
@ -333,9 +336,12 @@ class VolumeTest(vt_base.BaseVolumeTest):
|
||||
self._mock_create_server_volume_script(fva)
|
||||
self.stub_VolumeConstraint_validate()
|
||||
# delete script
|
||||
self.fc.volumes.get_server_volume(u'WikiDatabase',
|
||||
'vol-123').AndReturn(fva)
|
||||
self.fc.volumes.delete_server_volume(u'WikiDatabase',
|
||||
'vol-123').AndReturn(None)
|
||||
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
|
||||
self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123'
|
||||
).AndRaise(
|
||||
fakes_nova.fake_exception())
|
||||
|
||||
self.m.ReplayAll()
|
||||
|
||||
@ -395,11 +401,8 @@ class VolumeTest(vt_base.BaseVolumeTest):
|
||||
self.stub_VolumeConstraint_validate()
|
||||
# delete script
|
||||
fva = vt_base.FakeVolume('in-use')
|
||||
self.fc.volumes.get_server_volume(u'WikiDatabase',
|
||||
'vol-123').AndReturn(fva)
|
||||
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
|
||||
self.fc.volumes.delete_server_volume(
|
||||
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
|
||||
'WikiDatabase', 'vol-123').AndReturn(None)
|
||||
self.cinder_fc.volumes.get(fva.id).AndReturn(
|
||||
vt_base.FakeVolume('error', id=fva.id))
|
||||
self.m.ReplayAll()
|
||||
@ -444,13 +447,9 @@ class VolumeTest(vt_base.BaseVolumeTest):
|
||||
fv = self._mock_create_volume(vt_base.FakeVolume('creating'),
|
||||
stack_name)
|
||||
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(
|
||||
vt_base.FakeVolume('available'))
|
||||
self.cinder_fc.volumes.delete(fv.id).AndReturn(True)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(
|
||||
vt_base.FakeVolume('deleting'))
|
||||
self.cinder_fc.volumes.get(fv.id).AndRaise(
|
||||
cinder_exp.NotFound('Not found'))
|
||||
|
||||
self.m.ReplayAll()
|
||||
|
||||
stack = utils.parse_stack(self.t, stack_name=stack_name)
|
||||
@ -533,8 +532,10 @@ class VolumeTest(vt_base.BaseVolumeTest):
|
||||
|
||||
# snapshot script
|
||||
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
|
||||
self.cinder_fc.backups.create(fv.id).AndReturn(
|
||||
vt_base.FakeBackup('available'))
|
||||
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
|
||||
fb = vt_base.FakeBackup('available')
|
||||
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
|
||||
self.cinder_fc.backups.get(fb.id).AndReturn(fb)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self._mock_delete_volume(fv)
|
||||
|
||||
@ -555,10 +556,11 @@ class VolumeTest(vt_base.BaseVolumeTest):
|
||||
stack_name)
|
||||
|
||||
# snapshot script
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
|
||||
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
|
||||
fb = vt_base.FakeBackup('error')
|
||||
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
|
||||
self.cinder_fc.backups.get(fb.id).AndReturn(fb)
|
||||
self.m.ReplayAll()
|
||||
|
||||
self.t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
|
||||
|
@ -337,7 +337,6 @@ class CinderVolumeTest(vt_base.BaseVolumeTest):
|
||||
fv = vt_base.FakeVolume('available',
|
||||
size=1, attachments=[])
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.cinder_fc.volumes.extend(fv.id, 2)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(
|
||||
vt_base.FakeVolume('extending'))
|
||||
@ -371,7 +370,6 @@ class CinderVolumeTest(vt_base.BaseVolumeTest):
|
||||
fv = vt_base.FakeVolume('available',
|
||||
size=1, attachments=[])
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.cinder_fc.volumes.extend(fv.id, 2).AndRaise(
|
||||
cinder_exp.OverLimit(413))
|
||||
self.m.ReplayAll()
|
||||
@ -400,7 +398,6 @@ class CinderVolumeTest(vt_base.BaseVolumeTest):
|
||||
fv = vt_base.FakeVolume('available',
|
||||
size=1, attachments=[])
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
|
||||
self.cinder_fc.volumes.extend(fv.id, 2)
|
||||
self.cinder_fc.volumes.get(fv.id).AndReturn(
|
||||
vt_base.FakeVolume('extending'))
|
||||
|
Loading…
Reference in New Issue
Block a user