Add volume multi attach support
This patch includes the Cinder changes needed to support volume multiple attaches. Nova and python-cinderclient also need patches associated to provide support for multiple attachments. This adds the multiattach flag to volumes. When a volume is created, a multiattach flag can be set, which allows a volume to be attached to more than one Nova instance or host. If the multiattach flag is not set on a volume, it cannot be attached to more than one Nova instance or host Each volume attachment is tracked in a new volume_attachment table. The attachment id is the unique identifier for each attachment to an instance or host. When a volume is to be detached the attachment uuid must be passed in to the detach call in order to determine which attachment should be removed. Since a volume can be attached to an instance and a host, the attachment id is used as the attachment identifier. Nova: https://review.openstack.org/#/c/153033/ https://review.openstack.org/#/c/153038/ python-cinderclient: https://review.openstack.org/#/c/85856/ Change-Id: I950fa00ed5a30e7758245d5b0557f6df42dc58a3 Implements: blueprint multi-attach-volume APIImpact
This commit is contained in:
parent
490f03b48a
commit
10d5421687
@ -185,7 +185,10 @@ class VolumeAdminController(AdminController):
|
|||||||
raise exc.HTTPNotFound()
|
raise exc.HTTPNotFound()
|
||||||
self.volume_api.terminate_connection(context, volume,
|
self.volume_api.terminate_connection(context, volume,
|
||||||
{}, force=True)
|
{}, force=True)
|
||||||
self.volume_api.detach(context, volume)
|
|
||||||
|
attachment_id = body['os-force_detach'].get('attachment_id', None)
|
||||||
|
|
||||||
|
self.volume_api.detach(context, volume, attachment_id)
|
||||||
return webob.Response(status_int=202)
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
@wsgi.action('os-migrate_volume')
|
@wsgi.action('os-migrate_volume')
|
||||||
|
@ -127,7 +127,11 @@ class VolumeActionsController(wsgi.Controller):
|
|||||||
except exception.VolumeNotFound as error:
|
except exception.VolumeNotFound as error:
|
||||||
raise webob.exc.HTTPNotFound(explanation=error.msg)
|
raise webob.exc.HTTPNotFound(explanation=error.msg)
|
||||||
|
|
||||||
self.volume_api.detach(context, volume)
|
attachment_id = None
|
||||||
|
if body['os-detach']:
|
||||||
|
attachment_id = body['os-detach'].get('attachment_id', None)
|
||||||
|
|
||||||
|
self.volume_api.detach(context, volume, attachment_id)
|
||||||
return webob.Response(status_int=202)
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
@wsgi.action('os-reserve')
|
@wsgi.action('os-reserve')
|
||||||
|
@ -48,18 +48,18 @@ def _translate_attachment_detail_view(_context, vol):
|
|||||||
|
|
||||||
def _translate_attachment_summary_view(_context, vol):
|
def _translate_attachment_summary_view(_context, vol):
|
||||||
"""Maps keys for attachment summary view."""
|
"""Maps keys for attachment summary view."""
|
||||||
d = {}
|
d = []
|
||||||
|
attachments = vol.get('volume_attachment', [])
|
||||||
volume_id = vol['id']
|
for attachment in attachments:
|
||||||
|
if attachment.get('attach_status') == 'attached':
|
||||||
# NOTE(justinsb): We use the volume id as the id of the attachment object
|
a = {'id': attachment.get('volume_id'),
|
||||||
d['id'] = volume_id
|
'attachment_id': attachment.get('id'),
|
||||||
|
'volume_id': attachment.get('volume_id'),
|
||||||
d['volume_id'] = volume_id
|
'server_id': attachment.get('instance_uuid'),
|
||||||
d['server_id'] = vol['instance_uuid']
|
'host_name': attachment.get('attached_host'),
|
||||||
d['host_name'] = vol['attached_host']
|
'device': attachment.get('mountpoint'),
|
||||||
if vol.get('mountpoint'):
|
}
|
||||||
d['device'] = vol['mountpoint']
|
d.append(a)
|
||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -91,10 +91,14 @@ def _translate_volume_summary_view(context, vol, image_id=None):
|
|||||||
else:
|
else:
|
||||||
d['bootable'] = 'false'
|
d['bootable'] = 'false'
|
||||||
|
|
||||||
|
if vol['multiattach']:
|
||||||
|
d['multiattach'] = 'true'
|
||||||
|
else:
|
||||||
|
d['multiattach'] = 'false'
|
||||||
|
|
||||||
d['attachments'] = []
|
d['attachments'] = []
|
||||||
if vol['attach_status'] == 'attached':
|
if vol['attach_status'] == 'attached':
|
||||||
attachment = _translate_attachment_detail_view(context, vol)
|
d['attachments'] = _translate_attachment_detail_view(context, vol)
|
||||||
d['attachments'].append(attachment)
|
|
||||||
|
|
||||||
d['display_name'] = vol['display_name']
|
d['display_name'] = vol['display_name']
|
||||||
d['display_description'] = vol['display_description']
|
d['display_description'] = vol['display_description']
|
||||||
@ -146,6 +150,7 @@ def make_volume(elem):
|
|||||||
elem.set('volume_type')
|
elem.set('volume_type')
|
||||||
elem.set('snapshot_id')
|
elem.set('snapshot_id')
|
||||||
elem.set('source_volid')
|
elem.set('source_volid')
|
||||||
|
elem.set('multiattach')
|
||||||
|
|
||||||
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
|
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
|
||||||
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
|
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
|
||||||
@ -373,6 +378,8 @@ class VolumeController(wsgi.Controller):
|
|||||||
size = kwargs['source_volume']['size']
|
size = kwargs['source_volume']['size']
|
||||||
|
|
||||||
LOG.info(_LI("Create volume of %s GB"), size, context=context)
|
LOG.info(_LI("Create volume of %s GB"), size, context=context)
|
||||||
|
multiattach = volume.get('multiattach', False)
|
||||||
|
kwargs['multiattach'] = multiattach
|
||||||
|
|
||||||
image_href = None
|
image_href = None
|
||||||
image_uuid = None
|
image_uuid = None
|
||||||
|
@ -70,7 +70,8 @@ class ViewBuilder(common.ViewBuilder):
|
|||||||
'bootable': str(volume.get('bootable')).lower(),
|
'bootable': str(volume.get('bootable')).lower(),
|
||||||
'encrypted': self._is_volume_encrypted(volume),
|
'encrypted': self._is_volume_encrypted(volume),
|
||||||
'replication_status': volume.get('replication_status'),
|
'replication_status': volume.get('replication_status'),
|
||||||
'consistencygroup_id': volume.get('consistencygroup_id')
|
'consistencygroup_id': volume.get('consistencygroup_id'),
|
||||||
|
'multiattach': volume.get('multiattach')
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,19 +84,17 @@ class ViewBuilder(common.ViewBuilder):
|
|||||||
attachments = []
|
attachments = []
|
||||||
|
|
||||||
if volume['attach_status'] == 'attached':
|
if volume['attach_status'] == 'attached':
|
||||||
d = {}
|
attaches = volume.get('volume_attachment', [])
|
||||||
volume_id = volume['id']
|
for attachment in attaches:
|
||||||
|
if attachment.get('attach_status') == 'attached':
|
||||||
# note(justinsb): we use the volume id as the id of the attachments
|
a = {'id': attachment.get('volume_id'),
|
||||||
# object
|
'attachment_id': attachment.get('id'),
|
||||||
d['id'] = volume_id
|
'volume_id': attachment.get('volume_id'),
|
||||||
|
'server_id': attachment.get('instance_uuid'),
|
||||||
d['volume_id'] = volume_id
|
'host_name': attachment.get('attached_host'),
|
||||||
d['server_id'] = volume['instance_uuid']
|
'device': attachment.get('mountpoint'),
|
||||||
d['host_name'] = volume['attached_host']
|
}
|
||||||
if volume.get('mountpoint'):
|
attachments.append(a)
|
||||||
d['device'] = volume['mountpoint']
|
|
||||||
attachments.append(d)
|
|
||||||
|
|
||||||
return attachments
|
return attachments
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ SCHEDULER_HINTS_NAMESPACE =\
|
|||||||
|
|
||||||
def make_attachment(elem):
|
def make_attachment(elem):
|
||||||
elem.set('id')
|
elem.set('id')
|
||||||
|
elem.set('attachment_id')
|
||||||
elem.set('server_id')
|
elem.set('server_id')
|
||||||
elem.set('host_name')
|
elem.set('host_name')
|
||||||
elem.set('volume_id')
|
elem.set('volume_id')
|
||||||
@ -63,6 +64,7 @@ def make_volume(elem):
|
|||||||
elem.set('snapshot_id')
|
elem.set('snapshot_id')
|
||||||
elem.set('source_volid')
|
elem.set('source_volid')
|
||||||
elem.set('consistencygroup_id')
|
elem.set('consistencygroup_id')
|
||||||
|
elem.set('multiattach')
|
||||||
|
|
||||||
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
|
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
|
||||||
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
|
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
|
||||||
@ -412,6 +414,8 @@ class VolumeController(wsgi.Controller):
|
|||||||
|
|
||||||
kwargs['availability_zone'] = volume.get('availability_zone', None)
|
kwargs['availability_zone'] = volume.get('availability_zone', None)
|
||||||
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
|
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
|
||||||
|
multiattach = volume.get('multiattach', False)
|
||||||
|
kwargs['multiattach'] = multiattach
|
||||||
|
|
||||||
new_volume = self.volume_api.create(context,
|
new_volume = self.volume_api.create(context,
|
||||||
size,
|
size,
|
||||||
|
@ -197,16 +197,26 @@ class BackupManager(manager.SchedulerDependentManager):
|
|||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
volume_host = volume_utils.extract_host(volume['host'], 'backend')
|
||||||
backend = self._get_volume_backend(host=volume_host)
|
backend = self._get_volume_backend(host=volume_host)
|
||||||
|
attachments = volume['volume_attachment']
|
||||||
|
if attachments:
|
||||||
if volume['status'] == 'backing-up':
|
if volume['status'] == 'backing-up':
|
||||||
LOG.info(_LI('Resetting volume %s to available '
|
LOG.info(_LI('Resetting volume %s to available '
|
||||||
'(was backing-up).') % volume['id'])
|
'(was backing-up).'), volume['id'])
|
||||||
mgr = self._get_manager(backend)
|
mgr = self._get_manager(backend)
|
||||||
mgr.detach_volume(ctxt, volume['id'])
|
for attachment in attachments:
|
||||||
|
if (attachment['attached_host'] == self.host and
|
||||||
|
attachment['instance_uuid'] is None):
|
||||||
|
mgr.detach_volume(ctxt, volume['id'],
|
||||||
|
attachment['id'])
|
||||||
if volume['status'] == 'restoring-backup':
|
if volume['status'] == 'restoring-backup':
|
||||||
LOG.info(_LI('Resetting volume %s to error_restoring '
|
LOG.info(_LI('setting volume %s to error_restoring '
|
||||||
'(was restoring-backup).') % volume['id'])
|
'(was restoring-backup).'), volume['id'])
|
||||||
mgr = self._get_manager(backend)
|
mgr = self._get_manager(backend)
|
||||||
mgr.detach_volume(ctxt, volume['id'])
|
for attachment in attachments:
|
||||||
|
if (attachment['attached_host'] == self.host and
|
||||||
|
attachment['instance_uuid'] is None):
|
||||||
|
mgr.detach_volume(ctxt, volume['id'],
|
||||||
|
attachment['id'])
|
||||||
self.db.volume_update(ctxt, volume['id'],
|
self.db.volume_update(ctxt, volume['id'],
|
||||||
{'status': 'error_restoring'})
|
{'status': 'error_restoring'})
|
||||||
|
|
||||||
|
@ -136,10 +136,16 @@ def iscsi_target_create_safe(context, values):
|
|||||||
###############
|
###############
|
||||||
|
|
||||||
|
|
||||||
def volume_attached(context, volume_id, instance_id, host_name, mountpoint):
|
def volume_attach(context, values):
|
||||||
|
"""Attach a volume."""
|
||||||
|
return IMPL.volume_attach(context, values)
|
||||||
|
|
||||||
|
|
||||||
|
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
|
||||||
|
attach_mode='rw'):
|
||||||
"""Ensure that a volume is set as attached."""
|
"""Ensure that a volume is set as attached."""
|
||||||
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
|
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
|
||||||
mountpoint)
|
mountpoint, attach_mode)
|
||||||
|
|
||||||
|
|
||||||
def volume_create(context, values):
|
def volume_create(context, values):
|
||||||
@ -169,9 +175,9 @@ def volume_destroy(context, volume_id):
|
|||||||
return IMPL.volume_destroy(context, volume_id)
|
return IMPL.volume_destroy(context, volume_id)
|
||||||
|
|
||||||
|
|
||||||
def volume_detached(context, volume_id):
|
def volume_detached(context, volume_id, attachment_id):
|
||||||
"""Ensure that a volume is set as detached."""
|
"""Ensure that a volume is set as detached."""
|
||||||
return IMPL.volume_detached(context, volume_id)
|
return IMPL.volume_detached(context, volume_id, attachment_id)
|
||||||
|
|
||||||
|
|
||||||
def volume_get(context, volume_id):
|
def volume_get(context, volume_id):
|
||||||
@ -219,6 +225,27 @@ def volume_update(context, volume_id, values):
|
|||||||
return IMPL.volume_update(context, volume_id, values)
|
return IMPL.volume_update(context, volume_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def volume_attachment_update(context, attachment_id, values):
|
||||||
|
return IMPL.volume_attachment_update(context, attachment_id, values)
|
||||||
|
|
||||||
|
|
||||||
|
def volume_attachment_get(context, attachment_id, session=None):
|
||||||
|
return IMPL.volume_attachment_get(context, attachment_id, session)
|
||||||
|
|
||||||
|
|
||||||
|
def volume_attachment_get_used_by_volume_id(context, volume_id):
|
||||||
|
return IMPL.volume_attachment_get_used_by_volume_id(context, volume_id)
|
||||||
|
|
||||||
|
|
||||||
|
def volume_attachment_get_by_host(context, volume_id, host):
|
||||||
|
return IMPL.volume_attachment_get_by_host(context, volume_id, host)
|
||||||
|
|
||||||
|
|
||||||
|
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
|
||||||
|
return IMPL.volume_attachment_get_by_instance_uuid(context, volume_id,
|
||||||
|
instance_uuid)
|
||||||
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
|
|
||||||
|
|
||||||
|
@ -975,18 +975,51 @@ def reservation_expire(context):
|
|||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint):
|
def volume_attach(context, values):
|
||||||
|
volume_attachment_ref = models.VolumeAttachment()
|
||||||
|
if not values.get('id'):
|
||||||
|
values['id'] = str(uuid.uuid4())
|
||||||
|
|
||||||
|
volume_attachment_ref.update(values)
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
volume_attachment_ref.save(session=session)
|
||||||
|
return volume_attachment_get(context, values['id'],
|
||||||
|
session=session)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def volume_attached(context, attachment_id, instance_uuid, host_name,
|
||||||
|
mountpoint, attach_mode='rw'):
|
||||||
|
"""This method updates a volume attachment entry.
|
||||||
|
|
||||||
|
This function saves the information related to a particular
|
||||||
|
attachment for a volume. It also updates the volume record
|
||||||
|
to mark the volume as attached.
|
||||||
|
|
||||||
|
"""
|
||||||
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
|
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
|
||||||
raise exception.InvalidUUID(uuid=instance_uuid)
|
raise exception.InvalidUUID(uuid=instance_uuid)
|
||||||
|
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
volume_ref = _volume_get(context, volume_id, session=session)
|
volume_attachment_ref = volume_attachment_get(context, attachment_id,
|
||||||
|
session=session)
|
||||||
|
|
||||||
|
volume_attachment_ref['mountpoint'] = mountpoint
|
||||||
|
volume_attachment_ref['attach_status'] = 'attached'
|
||||||
|
volume_attachment_ref['instance_uuid'] = instance_uuid
|
||||||
|
volume_attachment_ref['attached_host'] = host_name
|
||||||
|
volume_attachment_ref['attach_time'] = timeutils.utcnow()
|
||||||
|
volume_attachment_ref['attach_mode'] = attach_mode
|
||||||
|
|
||||||
|
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'],
|
||||||
|
session=session)
|
||||||
|
volume_attachment_ref.save(session=session)
|
||||||
|
|
||||||
volume_ref['status'] = 'in-use'
|
volume_ref['status'] = 'in-use'
|
||||||
volume_ref['mountpoint'] = mountpoint
|
|
||||||
volume_ref['attach_status'] = 'attached'
|
volume_ref['attach_status'] = 'attached'
|
||||||
volume_ref['instance_uuid'] = instance_uuid
|
volume_ref.save(session=session)
|
||||||
volume_ref['attached_host'] = host_name
|
|
||||||
return volume_ref
|
return volume_ref
|
||||||
|
|
||||||
|
|
||||||
@ -1134,18 +1167,57 @@ def volume_destroy(context, volume_id):
|
|||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def volume_detached(context, volume_id):
|
def volume_detach(context, attachment_id):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
|
volume_attachment_ref = volume_attachment_get(context, attachment_id,
|
||||||
|
session=session)
|
||||||
|
volume_attachment_ref['attach_status'] = 'detaching'
|
||||||
|
volume_attachment_ref.save(session=session)
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def volume_detached(context, volume_id, attachment_id):
|
||||||
|
"""This updates a volume attachment and marks it as detached.
|
||||||
|
|
||||||
|
This method also ensures that the volume entry is correctly
|
||||||
|
marked as either still attached/in-use or detached/available
|
||||||
|
if this was the last detachment made.
|
||||||
|
|
||||||
|
"""
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
attachment = volume_attachment_get(context, attachment_id,
|
||||||
|
session=session)
|
||||||
|
|
||||||
|
# If this is already detached, attachment will be None
|
||||||
|
if attachment:
|
||||||
|
now = timeutils.utcnow()
|
||||||
|
attachment['attach_status'] = 'detached'
|
||||||
|
attachment['detach_time'] = now
|
||||||
|
attachment['deleted'] = True
|
||||||
|
attachment['deleted_at'] = now
|
||||||
|
attachment.save(session=session)
|
||||||
|
|
||||||
|
attachment_list = volume_attachment_get_used_by_volume_id(
|
||||||
|
context, volume_id, session=session)
|
||||||
|
remain_attachment = False
|
||||||
|
if attachment_list and len(attachment_list) > 0:
|
||||||
|
remain_attachment = True
|
||||||
|
|
||||||
volume_ref = _volume_get(context, volume_id, session=session)
|
volume_ref = _volume_get(context, volume_id, session=session)
|
||||||
# Hide status update from user if we're performing a volume migration
|
if not remain_attachment:
|
||||||
|
# Hide status update from user if we're performing volume migration
|
||||||
if not volume_ref['migration_status']:
|
if not volume_ref['migration_status']:
|
||||||
volume_ref['status'] = 'available'
|
volume_ref['status'] = 'available'
|
||||||
volume_ref['mountpoint'] = None
|
|
||||||
volume_ref['attach_status'] = 'detached'
|
volume_ref['attach_status'] = 'detached'
|
||||||
volume_ref['instance_uuid'] = None
|
volume_ref.save(session=session)
|
||||||
volume_ref['attached_host'] = None
|
else:
|
||||||
volume_ref['attach_time'] = None
|
# Volume is still attached
|
||||||
|
volume_ref['status'] = 'in-use'
|
||||||
|
volume_ref['attach_status'] = 'attached'
|
||||||
|
volume_ref.save(session=session)
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -1156,12 +1228,14 @@ def _volume_get_query(context, session=None, project_only=False):
|
|||||||
options(joinedload('volume_metadata')).\
|
options(joinedload('volume_metadata')).\
|
||||||
options(joinedload('volume_admin_metadata')).\
|
options(joinedload('volume_admin_metadata')).\
|
||||||
options(joinedload('volume_type')).\
|
options(joinedload('volume_type')).\
|
||||||
|
options(joinedload('volume_attachment')).\
|
||||||
options(joinedload('consistencygroup'))
|
options(joinedload('consistencygroup'))
|
||||||
else:
|
else:
|
||||||
return model_query(context, models.Volume, session=session,
|
return model_query(context, models.Volume, session=session,
|
||||||
project_only=project_only).\
|
project_only=project_only).\
|
||||||
options(joinedload('volume_metadata')).\
|
options(joinedload('volume_metadata')).\
|
||||||
options(joinedload('volume_type')).\
|
options(joinedload('volume_type')).\
|
||||||
|
options(joinedload('volume_attachment')).\
|
||||||
options(joinedload('consistencygroup'))
|
options(joinedload('consistencygroup'))
|
||||||
|
|
||||||
|
|
||||||
@ -1177,6 +1251,54 @@ def _volume_get(context, volume_id, session=None):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def volume_attachment_get(context, attachment_id, session=None):
|
||||||
|
result = model_query(context, models.VolumeAttachment,
|
||||||
|
session=session).\
|
||||||
|
filter_by(id=attachment_id).\
|
||||||
|
first()
|
||||||
|
if not result:
|
||||||
|
raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' %
|
||||||
|
attachment_id)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def volume_attachment_get_used_by_volume_id(context, volume_id, session=None):
|
||||||
|
result = model_query(context, models.VolumeAttachment,
|
||||||
|
session=session).\
|
||||||
|
filter_by(volume_id=volume_id).\
|
||||||
|
filter(models.VolumeAttachment.attach_status != 'detached').\
|
||||||
|
all()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def volume_attachment_get_by_host(context, volume_id, host):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
result = model_query(context, models.VolumeAttachment,
|
||||||
|
session=session).\
|
||||||
|
filter_by(volume_id=volume_id).\
|
||||||
|
filter_by(attached_host=host).\
|
||||||
|
filter(models.VolumeAttachment.attach_status != 'detached').\
|
||||||
|
first()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
result = model_query(context, models.VolumeAttachment,
|
||||||
|
session=session).\
|
||||||
|
filter_by(volume_id=volume_id).\
|
||||||
|
filter_by(instance_uuid=instance_uuid).\
|
||||||
|
filter(models.VolumeAttachment.attach_status != 'detached').\
|
||||||
|
first()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def volume_get(context, volume_id):
|
def volume_get(context, volume_id):
|
||||||
return _volume_get(context, volume_id)
|
return _volume_get(context, volume_id)
|
||||||
@ -1544,6 +1666,17 @@ def volume_update(context, volume_id, values):
|
|||||||
return volume_ref
|
return volume_ref
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def volume_attachment_update(context, attachment_id, values):
|
||||||
|
session = get_session()
|
||||||
|
with session.begin():
|
||||||
|
volume_attachment_ref = volume_attachment_get(context, attachment_id,
|
||||||
|
session=session)
|
||||||
|
volume_attachment_ref.update(values)
|
||||||
|
volume_attachment_ref.save(session=session)
|
||||||
|
return volume_attachment_ref
|
||||||
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
|
|
||||||
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
|
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
|
||||||
|
@ -0,0 +1,147 @@
|
|||||||
|
# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
import six
|
||||||
|
from sqlalchemy import Boolean, Column, DateTime
|
||||||
|
from sqlalchemy import ForeignKey, MetaData, String, Table
|
||||||
|
|
||||||
|
from cinder.i18n import _LE
|
||||||
|
from cinder.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CREATED_AT = datetime.datetime.now()
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
"""Add volume multi attachment table."""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# add the multiattach flag to the volumes table.
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
multiattach = Column('multiattach', Boolean)
|
||||||
|
volumes.create_column(multiattach)
|
||||||
|
volumes.update().values(multiattach=False).execute()
|
||||||
|
|
||||||
|
# The new volume_attachment table
|
||||||
|
volume_attachment = Table(
|
||||||
|
'volume_attachment', meta,
|
||||||
|
Column('created_at', DateTime),
|
||||||
|
Column('updated_at', DateTime),
|
||||||
|
Column('deleted_at', DateTime),
|
||||||
|
Column('deleted', Boolean),
|
||||||
|
Column('id', String(length=36), primary_key=True, nullable=False),
|
||||||
|
Column('volume_id', String(length=36), ForeignKey('volumes.id'),
|
||||||
|
nullable=False),
|
||||||
|
Column('attached_host', String(length=255)),
|
||||||
|
Column('instance_uuid', String(length=36)),
|
||||||
|
Column('mountpoint', String(length=255)),
|
||||||
|
Column('attach_time', DateTime),
|
||||||
|
Column('detach_time', DateTime),
|
||||||
|
Column('attach_mode', String(length=36)),
|
||||||
|
Column('attach_status', String(length=255)),
|
||||||
|
mysql_engine='InnoDB'
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
volume_attachment.create()
|
||||||
|
except Exception:
|
||||||
|
LOG.error(_LE("Table volume_attachment not created!"))
|
||||||
|
raise
|
||||||
|
|
||||||
|
# now migrate existing volume attachment info into the
|
||||||
|
# new volume_attachment table
|
||||||
|
volumes_list = list(volumes.select().execute())
|
||||||
|
for volume in volumes_list:
|
||||||
|
if volume.attach_status == 'attached':
|
||||||
|
attachment = volume_attachment.insert()
|
||||||
|
values = {'id': six.text_type(uuid.uuid4()),
|
||||||
|
'created_at': CREATED_AT,
|
||||||
|
'deleted_at': None,
|
||||||
|
'deleted': 0,
|
||||||
|
'volume_id': volume.id,
|
||||||
|
'attached_host': volume.host,
|
||||||
|
'instance_uuid': volume.instance_uuid,
|
||||||
|
'mountpoint': volume.mountpoint,
|
||||||
|
'attach_time': volume.attach_time,
|
||||||
|
'attach_mode': 'rw',
|
||||||
|
'attach_status': 'attached',
|
||||||
|
}
|
||||||
|
attachment.execute(values)
|
||||||
|
|
||||||
|
# we have no reason to keep the columns that now
|
||||||
|
# exist in the volume_attachment table
|
||||||
|
mountpoint = volumes.columns.mountpoint
|
||||||
|
volumes.drop_column(mountpoint)
|
||||||
|
instance_uuid = volumes.columns.instance_uuid
|
||||||
|
volumes.drop_column(instance_uuid)
|
||||||
|
attach_time = volumes.columns.attach_time
|
||||||
|
volumes.drop_column(attach_time)
|
||||||
|
attached_host = volumes.columns.attached_host
|
||||||
|
volumes.drop_column(attached_host)
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(migrate_engine):
|
||||||
|
"""Remove volume_attachment table."""
|
||||||
|
meta = MetaData()
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
|
# Put the needed volumes table columns back
|
||||||
|
volumes = Table('volumes', meta, autoload=True)
|
||||||
|
multiattach = volumes.columns.multiattach
|
||||||
|
volumes.drop_column(multiattach)
|
||||||
|
|
||||||
|
attached_host = Column('attached_host', String(length=255))
|
||||||
|
volumes.create_column(attached_host)
|
||||||
|
volumes.update().values(attached_host=None).execute()
|
||||||
|
|
||||||
|
attach_time = Column('attach_time', String(length=255))
|
||||||
|
volumes.create_column(attach_time)
|
||||||
|
volumes.update().values(attach_time=None).execute()
|
||||||
|
|
||||||
|
instance_uuid = Column('instance_uuid', String(length=36))
|
||||||
|
volumes.create_column(instance_uuid)
|
||||||
|
volumes.update().values(instance_uuid=None).execute()
|
||||||
|
|
||||||
|
mountpoint = Column('mountpoint', String(length=255))
|
||||||
|
volumes.create_column(mountpoint)
|
||||||
|
volumes.update().values(mountpoint=None).execute()
|
||||||
|
|
||||||
|
volume_attachment = Table('volume_attachment', meta, autoload=True)
|
||||||
|
attachments = list(volume_attachment.select().execute())
|
||||||
|
for attachment in attachments:
|
||||||
|
# we are going to lose data here for
|
||||||
|
# multiple attaches. We'll migrate and the
|
||||||
|
# last update wins.
|
||||||
|
|
||||||
|
if not attachment.deleted_at:
|
||||||
|
volume_id = attachment.volume_id
|
||||||
|
volumes.update().\
|
||||||
|
where(volumes.c.id == volume_id).\
|
||||||
|
values(mountpoint=attachment.mountpoint,
|
||||||
|
attached_host=attachment.attached_host,
|
||||||
|
attach_time=attachment.attach_time,
|
||||||
|
instance_uuid=attachment.instance_uuid).\
|
||||||
|
execute()
|
||||||
|
try:
|
||||||
|
volume_attachment.drop()
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
LOG.error(_LE("Dropping volume_attachment table failed."))
|
||||||
|
raise
|
@ -0,0 +1,87 @@
|
|||||||
|
BEGIN TRANSACTION;
|
||||||
|
|
||||||
|
CREATE TABLE volumes_v39 (
|
||||||
|
created_at DATETIME,
|
||||||
|
updated_at DATETIME,
|
||||||
|
deleted_at DATETIME,
|
||||||
|
deleted BOOLEAN,
|
||||||
|
id VARCHAR(36) NOT NULL,
|
||||||
|
ec2_id INTEGER,
|
||||||
|
user_id VARCHAR(255),
|
||||||
|
project_id VARCHAR(255),
|
||||||
|
snapshot_id VARCHAR(36),
|
||||||
|
host VARCHAR(255),
|
||||||
|
size INTEGER,
|
||||||
|
availability_zone VARCHAR(255),
|
||||||
|
status VARCHAR(255),
|
||||||
|
attach_status VARCHAR(255),
|
||||||
|
scheduled_at DATETIME,
|
||||||
|
launched_at DATETIME,
|
||||||
|
terminated_at DATETIME,
|
||||||
|
display_name VARCHAR(255),
|
||||||
|
display_description VARCHAR(255),
|
||||||
|
provider_location VARCHAR(255),
|
||||||
|
provider_auth VARCHAR(255),
|
||||||
|
volume_type_id VARCHAR(36),
|
||||||
|
source_volid VARCHAR(36),
|
||||||
|
bootable INTEGER,
|
||||||
|
provider_geometry VARCHAR(255),
|
||||||
|
_name_id VARCHAR(36),
|
||||||
|
encryption_key_id VARCHAR(36),
|
||||||
|
migration_status VARCHAR(255),
|
||||||
|
attached_host VARCHAR(255),
|
||||||
|
attach_time VARCHAR(255),
|
||||||
|
instance_uuid VARCHAR(36),
|
||||||
|
mountpoint VARCHAR(255),
|
||||||
|
consistencygroup_id VARCHAR(36),
|
||||||
|
replication_status VARCHAR(255),
|
||||||
|
replication_extended_status VARCHAR(255),
|
||||||
|
replication_driver_data VARCHAR(255),
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO volumes_v39
|
||||||
|
SELECT volumes.created_at,
|
||||||
|
volumes.updated_at,
|
||||||
|
volumes.deleted_at,
|
||||||
|
volumes.deleted,
|
||||||
|
volumes.id,
|
||||||
|
volumes.ec2_id,
|
||||||
|
volumes.user_id,
|
||||||
|
volumes.project_id,
|
||||||
|
volumes.snapshot_id,
|
||||||
|
volumes.host,
|
||||||
|
volumes.size,
|
||||||
|
volumes.availability_zone,
|
||||||
|
volumes.status,
|
||||||
|
volumes.attach_status,
|
||||||
|
volumes.scheduled_at,
|
||||||
|
volumes.launched_at,
|
||||||
|
volumes.terminated_at,
|
||||||
|
volumes.display_name,
|
||||||
|
volumes.display_description,
|
||||||
|
volumes.provider_location,
|
||||||
|
volumes.provider_auth,
|
||||||
|
volumes.volume_type_id,
|
||||||
|
volumes.source_volid,
|
||||||
|
volumes.bootable,
|
||||||
|
volumes.provider_geometry,
|
||||||
|
volumes._name_id,
|
||||||
|
volumes.encryption_key_id,
|
||||||
|
volumes.migration_status,
|
||||||
|
volume_attachment.attached_host,
|
||||||
|
volume_attachment.attach_time,
|
||||||
|
volume_attachment.instance_uuid,
|
||||||
|
volume_attachment.mountpoint,
|
||||||
|
volumes.consistencygroup_id,
|
||||||
|
volumes.replication_status,
|
||||||
|
volumes.replication_extended_status,
|
||||||
|
volumes.replication_driver_data
|
||||||
|
FROM volumes
|
||||||
|
LEFT OUTER JOIN volume_attachment
|
||||||
|
ON volumes.id=volume_attachment.volume_id;
|
||||||
|
|
||||||
|
DROP TABLE volumes;
|
||||||
|
ALTER TABLE volumes_v39 RENAME TO volumes;
|
||||||
|
DROP TABLE volume_attachment;
|
||||||
|
COMMIT;
|
@ -129,10 +129,6 @@ class Volume(BASE, CinderBase):
|
|||||||
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
host = Column(String(255)) # , ForeignKey('hosts.id'))
|
||||||
size = Column(Integer)
|
size = Column(Integer)
|
||||||
availability_zone = Column(String(255)) # TODO(vish): foreign key?
|
availability_zone = Column(String(255)) # TODO(vish): foreign key?
|
||||||
instance_uuid = Column(String(36))
|
|
||||||
attached_host = Column(String(255))
|
|
||||||
mountpoint = Column(String(255))
|
|
||||||
attach_time = Column(String(255)) # TODO(vish): datetime
|
|
||||||
status = Column(String(255)) # TODO(vish): enum?
|
status = Column(String(255)) # TODO(vish): enum?
|
||||||
attach_status = Column(String(255)) # TODO(vish): enum
|
attach_status = Column(String(255)) # TODO(vish): enum
|
||||||
migration_status = Column(String(255))
|
migration_status = Column(String(255))
|
||||||
@ -157,6 +153,7 @@ class Volume(BASE, CinderBase):
|
|||||||
|
|
||||||
deleted = Column(Boolean, default=False)
|
deleted = Column(Boolean, default=False)
|
||||||
bootable = Column(Boolean, default=False)
|
bootable = Column(Boolean, default=False)
|
||||||
|
multiattach = Column(Boolean, default=False)
|
||||||
|
|
||||||
replication_status = Column(String(255))
|
replication_status = Column(String(255))
|
||||||
replication_extended_status = Column(String(255))
|
replication_extended_status = Column(String(255))
|
||||||
@ -197,6 +194,26 @@ class VolumeAdminMetadata(BASE, CinderBase):
|
|||||||
'VolumeAdminMetadata.deleted == False)')
|
'VolumeAdminMetadata.deleted == False)')
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeAttachment(BASE, CinderBase):
|
||||||
|
"""Represents a volume attachment for a vm."""
|
||||||
|
__tablename__ = 'volume_attachment'
|
||||||
|
id = Column(String(36), primary_key=True)
|
||||||
|
|
||||||
|
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
|
||||||
|
volume = relationship(Volume, backref="volume_attachment",
|
||||||
|
foreign_keys=volume_id,
|
||||||
|
primaryjoin='and_('
|
||||||
|
'VolumeAttachment.volume_id == Volume.id,'
|
||||||
|
'VolumeAttachment.deleted == False)')
|
||||||
|
instance_uuid = Column(String(36))
|
||||||
|
attached_host = Column(String(255))
|
||||||
|
mountpoint = Column(String(255))
|
||||||
|
attach_time = Column(DateTime)
|
||||||
|
detach_time = Column(DateTime)
|
||||||
|
attach_status = Column(String(255))
|
||||||
|
attach_mode = Column(String(255))
|
||||||
|
|
||||||
|
|
||||||
class VolumeTypes(BASE, CinderBase):
|
class VolumeTypes(BASE, CinderBase):
|
||||||
"""Represent possible volume_types of volumes offered."""
|
"""Represent possible volume_types of volumes offered."""
|
||||||
__tablename__ = "volume_types"
|
__tablename__ = "volume_types"
|
||||||
@ -576,6 +593,7 @@ def register_models():
|
|||||||
Volume,
|
Volume,
|
||||||
VolumeMetadata,
|
VolumeMetadata,
|
||||||
VolumeAdminMetadata,
|
VolumeAdminMetadata,
|
||||||
|
VolumeAttachment,
|
||||||
SnapshotMetadata,
|
SnapshotMetadata,
|
||||||
Transfer,
|
Transfer,
|
||||||
VolumeTypeExtraSpecs,
|
VolumeTypeExtraSpecs,
|
||||||
|
@ -252,6 +252,11 @@ class VolumeNotFound(NotFound):
|
|||||||
message = _("Volume %(volume_id)s could not be found.")
|
message = _("Volume %(volume_id)s could not be found.")
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeAttachmentNotFound(NotFound):
|
||||||
|
message = _("Volume attachment could not be found with "
|
||||||
|
"filter: %(filter)s .")
|
||||||
|
|
||||||
|
|
||||||
class VolumeMetadataNotFound(NotFound):
|
class VolumeMetadataNotFound(NotFound):
|
||||||
message = _("Volume %(volume_id)s has no metadata with "
|
message = _("Volume %(volume_id)s has no metadata with "
|
||||||
"key %(metadata_key)s.")
|
"key %(metadata_key)s.")
|
||||||
|
@ -45,10 +45,6 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
|
|||||||
'host': fields.StringField(nullable=True),
|
'host': fields.StringField(nullable=True),
|
||||||
'size': fields.IntegerField(),
|
'size': fields.IntegerField(),
|
||||||
'availability_zone': fields.StringField(),
|
'availability_zone': fields.StringField(),
|
||||||
'instance_uuid': fields.UUIDField(nullable=True),
|
|
||||||
'attached_host': fields.StringField(nullable=True),
|
|
||||||
'mountpoint': fields.StringField(nullable=True),
|
|
||||||
'attach_time': fields.StringField(nullable=True),
|
|
||||||
'status': fields.StringField(),
|
'status': fields.StringField(),
|
||||||
'attach_status': fields.StringField(),
|
'attach_status': fields.StringField(),
|
||||||
'migration_status': fields.StringField(nullable=True),
|
'migration_status': fields.StringField(nullable=True),
|
||||||
|
@ -391,15 +391,14 @@ class AdminActionsTest(test.TestCase):
|
|||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
self.volume_api.reserve_volume(ctx, volume)
|
self.volume_api.reserve_volume(ctx, volume)
|
||||||
mountpoint = '/dev/vbd'
|
mountpoint = '/dev/vbd'
|
||||||
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
|
attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID,
|
||||||
mountpoint, 'rw')
|
None, mountpoint, 'rw')
|
||||||
# volume is attached
|
# volume is attached
|
||||||
volume = db.volume_get(ctx, volume['id'])
|
volume = db.volume_get(ctx, volume['id'])
|
||||||
self.assertEqual(volume['status'], 'in-use')
|
self.assertEqual(volume['status'], 'in-use')
|
||||||
self.assertEqual(volume['instance_uuid'], stubs.FAKE_UUID)
|
self.assertEqual(attachment['instance_uuid'], stubs.FAKE_UUID)
|
||||||
self.assertIsNone(volume['attached_host'])
|
self.assertEqual(attachment['mountpoint'], mountpoint)
|
||||||
self.assertEqual(volume['mountpoint'], mountpoint)
|
self.assertEqual(attachment['attach_status'], 'attached')
|
||||||
self.assertEqual(volume['attach_status'], 'attached')
|
|
||||||
admin_metadata = volume['volume_admin_metadata']
|
admin_metadata = volume['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(len(admin_metadata), 2)
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
||||||
@ -415,7 +414,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.headers['content-type'] = 'application/json'
|
req.headers['content-type'] = 'application/json'
|
||||||
# request status of 'error'
|
# request status of 'error'
|
||||||
req.body = jsonutils.dumps({'os-force_detach': None})
|
req.body = jsonutils.dumps({'os-force_detach':
|
||||||
|
{'attachment_id': attachment['id']}})
|
||||||
# attach admin context to request
|
# attach admin context to request
|
||||||
req.environ['cinder.context'] = ctx
|
req.environ['cinder.context'] = ctx
|
||||||
# make request
|
# make request
|
||||||
@ -423,12 +423,12 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# request is accepted
|
# request is accepted
|
||||||
self.assertEqual(resp.status_int, 202)
|
self.assertEqual(resp.status_int, 202)
|
||||||
volume = db.volume_get(ctx, volume['id'])
|
volume = db.volume_get(ctx, volume['id'])
|
||||||
|
self.assertRaises(exception.VolumeAttachmentNotFound,
|
||||||
|
db.volume_attachment_get,
|
||||||
|
ctx, attachment['id'])
|
||||||
|
|
||||||
# status changed to 'available'
|
# status changed to 'available'
|
||||||
self.assertEqual(volume['status'], 'available')
|
self.assertEqual(volume['status'], 'available')
|
||||||
self.assertIsNone(volume['instance_uuid'])
|
|
||||||
self.assertIsNone(volume['attached_host'])
|
|
||||||
self.assertIsNone(volume['mountpoint'])
|
|
||||||
self.assertEqual(volume['attach_status'], 'detached')
|
|
||||||
admin_metadata = volume['volume_admin_metadata']
|
admin_metadata = volume['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 1)
|
self.assertEqual(len(admin_metadata), 1)
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
||||||
@ -445,17 +445,18 @@ class AdminActionsTest(test.TestCase):
|
|||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
self.volume_api.reserve_volume(ctx, volume)
|
self.volume_api.initialize_connection(ctx, volume, connector)
|
||||||
mountpoint = '/dev/vbd'
|
mountpoint = '/dev/vbd'
|
||||||
host_name = 'fake-host'
|
host_name = 'fake-host'
|
||||||
self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro')
|
attachment = self.volume_api.attach(ctx, volume, None, host_name,
|
||||||
|
mountpoint, 'ro')
|
||||||
# volume is attached
|
# volume is attached
|
||||||
volume = db.volume_get(ctx, volume['id'])
|
volume = db.volume_get(ctx, volume['id'])
|
||||||
self.assertEqual(volume['status'], 'in-use')
|
self.assertEqual(volume['status'], 'in-use')
|
||||||
self.assertIsNone(volume['instance_uuid'])
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
self.assertEqual(volume['attached_host'], host_name)
|
self.assertEqual(attachment['attached_host'], host_name)
|
||||||
self.assertEqual(volume['mountpoint'], mountpoint)
|
self.assertEqual(attachment['mountpoint'], mountpoint)
|
||||||
self.assertEqual(volume['attach_status'], 'attached')
|
self.assertEqual(attachment['attach_status'], 'attached')
|
||||||
admin_metadata = volume['volume_admin_metadata']
|
admin_metadata = volume['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(len(admin_metadata), 2)
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
||||||
@ -470,7 +471,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
req.method = 'POST'
|
req.method = 'POST'
|
||||||
req.headers['content-type'] = 'application/json'
|
req.headers['content-type'] = 'application/json'
|
||||||
# request status of 'error'
|
# request status of 'error'
|
||||||
req.body = jsonutils.dumps({'os-force_detach': None})
|
req.body = jsonutils.dumps({'os-force_detach':
|
||||||
|
{'attachment_id': attachment['id']}})
|
||||||
# attach admin context to request
|
# attach admin context to request
|
||||||
req.environ['cinder.context'] = ctx
|
req.environ['cinder.context'] = ctx
|
||||||
# make request
|
# make request
|
||||||
@ -478,12 +480,11 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# request is accepted
|
# request is accepted
|
||||||
self.assertEqual(resp.status_int, 202)
|
self.assertEqual(resp.status_int, 202)
|
||||||
volume = db.volume_get(ctx, volume['id'])
|
volume = db.volume_get(ctx, volume['id'])
|
||||||
|
self.assertRaises(exception.VolumeAttachmentNotFound,
|
||||||
|
db.volume_attachment_get,
|
||||||
|
ctx, attachment['id'])
|
||||||
# status changed to 'available'
|
# status changed to 'available'
|
||||||
self.assertEqual(volume['status'], 'available')
|
self.assertEqual(volume['status'], 'available')
|
||||||
self.assertIsNone(volume['instance_uuid'])
|
|
||||||
self.assertIsNone(volume['attached_host'])
|
|
||||||
self.assertIsNone(volume['mountpoint'])
|
|
||||||
self.assertEqual(volume['attach_status'], 'detached')
|
|
||||||
admin_metadata = volume['volume_admin_metadata']
|
admin_metadata = volume['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 1)
|
self.assertEqual(len(admin_metadata), 1)
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
||||||
@ -502,11 +503,10 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
self.volume_api.reserve_volume(ctx, volume)
|
self.volume_api.reserve_volume(ctx, volume)
|
||||||
mountpoint = '/dev/vbd'
|
|
||||||
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
|
|
||||||
mountpoint, 'rw')
|
|
||||||
conn_info = self.volume_api.initialize_connection(ctx,
|
conn_info = self.volume_api.initialize_connection(ctx,
|
||||||
volume, connector)
|
volume, connector)
|
||||||
|
self.volume_api.attach(ctx, volume, fakes.get_fake_uuid(), None,
|
||||||
|
'/dev/vbd0', 'rw')
|
||||||
self.assertEqual(conn_info['data']['access_mode'], 'rw')
|
self.assertEqual(conn_info['data']['access_mode'], 'rw')
|
||||||
self.assertRaises(exception.InvalidVolume,
|
self.assertRaises(exception.InvalidVolume,
|
||||||
self.volume_api.attach,
|
self.volume_api.attach,
|
||||||
@ -514,15 +514,7 @@ class AdminActionsTest(test.TestCase):
|
|||||||
volume,
|
volume,
|
||||||
fakes.get_fake_uuid(),
|
fakes.get_fake_uuid(),
|
||||||
None,
|
None,
|
||||||
mountpoint,
|
'/dev/vdb1',
|
||||||
'rw')
|
|
||||||
self.assertRaises(exception.InvalidVolume,
|
|
||||||
self.volume_api.attach,
|
|
||||||
ctx,
|
|
||||||
volume,
|
|
||||||
fakes.get_fake_uuid(),
|
|
||||||
None,
|
|
||||||
mountpoint,
|
|
||||||
'ro')
|
'ro')
|
||||||
# cleanup
|
# cleanup
|
||||||
svc.stop()
|
svc.stop()
|
||||||
@ -538,9 +530,9 @@ class AdminActionsTest(test.TestCase):
|
|||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
self.volume_api.reserve_volume(ctx, volume)
|
self.volume_api.reserve_volume(ctx, volume)
|
||||||
mountpoint = '/dev/vbd'
|
self.volume_api.initialize_connection(ctx, volume, connector)
|
||||||
host_name = 'fake_host'
|
self.volume_api.attach(ctx, volume, None, 'fake_host1',
|
||||||
self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'rw')
|
'/dev/vbd0', 'rw')
|
||||||
conn_info = self.volume_api.initialize_connection(ctx,
|
conn_info = self.volume_api.initialize_connection(ctx,
|
||||||
volume, connector)
|
volume, connector)
|
||||||
conn_info['data']['access_mode'] = 'rw'
|
conn_info['data']['access_mode'] = 'rw'
|
||||||
@ -549,16 +541,8 @@ class AdminActionsTest(test.TestCase):
|
|||||||
ctx,
|
ctx,
|
||||||
volume,
|
volume,
|
||||||
None,
|
None,
|
||||||
host_name,
|
'fake_host2',
|
||||||
mountpoint,
|
'/dev/vbd1',
|
||||||
'rw')
|
|
||||||
self.assertRaises(exception.InvalidVolume,
|
|
||||||
self.volume_api.attach,
|
|
||||||
ctx,
|
|
||||||
volume,
|
|
||||||
None,
|
|
||||||
host_name,
|
|
||||||
mountpoint,
|
|
||||||
'ro')
|
'ro')
|
||||||
# cleanup
|
# cleanup
|
||||||
svc.stop()
|
svc.stop()
|
||||||
@ -587,19 +571,23 @@ class AdminActionsTest(test.TestCase):
|
|||||||
'provider_location': '', 'size': 1})
|
'provider_location': '', 'size': 1})
|
||||||
# start service to handle rpc messages for attach requests
|
# start service to handle rpc messages for attach requests
|
||||||
svc = self.start_service('volume', host='test')
|
svc = self.start_service('volume', host='test')
|
||||||
values = {'status': 'attaching',
|
self.volume_api.reserve_volume(ctx, volume)
|
||||||
'instance_uuid': fakes.get_fake_uuid()}
|
values = {'volume_id': volume['id'],
|
||||||
db.volume_update(ctx, volume['id'], values)
|
'attach_status': 'attaching',
|
||||||
|
'attach_time': timeutils.utcnow(),
|
||||||
|
'instance_uuid': 'abc123',
|
||||||
|
}
|
||||||
|
db.volume_attach(ctx, values)
|
||||||
|
db.volume_admin_metadata_update(ctx, volume['id'],
|
||||||
|
{"attached_mode": 'rw'}, False)
|
||||||
mountpoint = '/dev/vbd'
|
mountpoint = '/dev/vbd'
|
||||||
self.assertRaises(exception.InvalidVolume,
|
attachment = self.volume_api.attach(ctx, volume,
|
||||||
self.volume_api.attach,
|
stubs.FAKE_UUID, None,
|
||||||
ctx,
|
mountpoint, 'rw')
|
||||||
volume,
|
|
||||||
stubs.FAKE_UUID,
|
self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid'])
|
||||||
None,
|
self.assertEqual(volume['id'], attachment['volume_id'], volume['id'])
|
||||||
mountpoint,
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
'rw')
|
|
||||||
# cleanup
|
|
||||||
svc.stop()
|
svc.stop()
|
||||||
|
|
||||||
def test_attach_attaching_volume_with_different_mode(self):
|
def test_attach_attaching_volume_with_different_mode(self):
|
||||||
|
@ -37,7 +37,7 @@ CONF = cfg.CONF
|
|||||||
|
|
||||||
class VolumeActionsTest(test.TestCase):
|
class VolumeActionsTest(test.TestCase):
|
||||||
|
|
||||||
_actions = ('os-detach', 'os-reserve', 'os-unreserve')
|
_actions = ('os-reserve', 'os-unreserve')
|
||||||
|
|
||||||
_methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume')
|
_methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume')
|
||||||
|
|
||||||
@ -179,6 +179,16 @@ class VolumeActionsTest(test.TestCase):
|
|||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
self.assertEqual(res.status_int, 202)
|
self.assertEqual(res.status_int, 202)
|
||||||
|
|
||||||
|
def test_detach(self):
|
||||||
|
body = {'os-detach': {'attachment_id': 'fakeuuid'}}
|
||||||
|
req = webob.Request.blank('/v2/fake/volumes/1/action')
|
||||||
|
req.method = "POST"
|
||||||
|
req.body = jsonutils.dumps(body)
|
||||||
|
req.headers["content-type"] = "application/json"
|
||||||
|
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(202, res.status_int)
|
||||||
|
|
||||||
def test_attach_with_invalid_arguments(self):
|
def test_attach_with_invalid_arguments(self):
|
||||||
# Invalid request to attach volume an invalid target
|
# Invalid request to attach volume an invalid target
|
||||||
body = {'os-attach': {'mountpoint': '/dev/vdc'}}
|
body = {'os-attach': {'mountpoint': '/dev/vdc'}}
|
||||||
|
@ -29,9 +29,6 @@ def stub_volume(id, **kwargs):
|
|||||||
'host': 'fakehost',
|
'host': 'fakehost',
|
||||||
'size': 1,
|
'size': 1,
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'instance_uuid': 'fakeuuid',
|
|
||||||
'attached_host': None,
|
|
||||||
'mountpoint': '/',
|
|
||||||
'attached_mode': 'rw',
|
'attached_mode': 'rw',
|
||||||
'status': 'fakestatus',
|
'status': 'fakestatus',
|
||||||
'migration_status': None,
|
'migration_status': None,
|
||||||
@ -46,6 +43,8 @@ def stub_volume(id, **kwargs):
|
|||||||
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
|
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
|
||||||
'volume_metadata': [],
|
'volume_metadata': [],
|
||||||
'volume_type': {'name': 'vol_type_name'},
|
'volume_type': {'name': 'vol_type_name'},
|
||||||
|
'volume_attachment': [],
|
||||||
|
'multiattach': False,
|
||||||
'readonly': 'False'}
|
'readonly': 'False'}
|
||||||
|
|
||||||
volume.update(kwargs)
|
volume.update(kwargs)
|
||||||
|
@ -85,11 +85,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'zone1:host1',
|
'availability_zone': 'zone1:host1',
|
||||||
'display_name': 'Volume Test Name',
|
'display_name': 'Volume Test Name',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [],
|
||||||
'server_id': 'fakeuuid',
|
'multiattach': 'false',
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -176,11 +173,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'display_name': 'Volume Test Name',
|
'display_name': 'Volume Test Name',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [],
|
||||||
'server_id': 'fakeuuid',
|
'multiattach': 'false',
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'image_id': test_id,
|
'image_id': test_id,
|
||||||
@ -258,13 +252,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'Updated Test Name',
|
'display_name': 'Updated Test Name',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{
|
'attachments': [],
|
||||||
'id': '1',
|
'multiattach': 'false',
|
||||||
'volume_id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'device': '/'
|
|
||||||
}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -294,13 +283,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{
|
'attachments': [],
|
||||||
'id': '1',
|
'multiattach': 'false',
|
||||||
'volume_id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'device': '/'
|
|
||||||
}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -328,6 +312,10 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
updates = {
|
updates = {
|
||||||
"display_name": "Updated Test Name",
|
"display_name": "Updated Test Name",
|
||||||
@ -339,18 +327,20 @@ class VolumeApiTest(test.TestCase):
|
|||||||
req.environ['cinder.context'] = admin_ctx
|
req.environ['cinder.context'] = admin_ctx
|
||||||
res_dict = self.controller.update(req, '1', body)
|
res_dict = self.controller.update(req, '1', body)
|
||||||
expected = {'volume': {
|
expected = {'volume': {
|
||||||
'status': 'fakestatus',
|
'status': 'in-use',
|
||||||
'display_description': 'displaydesc',
|
'display_description': 'displaydesc',
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'Updated Test Name',
|
'display_name': 'Updated Test Name',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{
|
'attachments': [{
|
||||||
|
'attachment_id': attachment['id'],
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1',
|
'volume_id': '1',
|
||||||
'server_id': 'fakeuuid',
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'device': '/'
|
'device': '/'
|
||||||
}],
|
}],
|
||||||
|
'multiattach': 'false',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': None,
|
'volume_type': None,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -400,11 +390,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [],
|
||||||
'server_id': 'fakeuuid',
|
'multiattach': 'false',
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -430,21 +417,28 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes')
|
req = fakes.HTTPRequest.blank('/v1/volumes')
|
||||||
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
||||||
req.environ['cinder.context'] = admin_ctx
|
req.environ['cinder.context'] = admin_ctx
|
||||||
res_dict = self.controller.index(req)
|
res_dict = self.controller.index(req)
|
||||||
expected = {'volumes': [{'status': 'fakestatus',
|
expected = {'volumes': [{'status': 'in-use',
|
||||||
'display_description': 'displaydesc',
|
'display_description': 'displaydesc',
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [
|
||||||
'server_id': 'fakeuuid',
|
{'attachment_id': attachment['id'],
|
||||||
|
'device': '/',
|
||||||
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1'}],
|
'volume_id': '1'}],
|
||||||
|
'multiattach': 'false',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': None,
|
'volume_type': None,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -469,11 +463,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [],
|
||||||
'server_id': 'fakeuuid',
|
'multiattach': 'false',
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -499,21 +490,28 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
|
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
|
||||||
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
||||||
req.environ['cinder.context'] = admin_ctx
|
req.environ['cinder.context'] = admin_ctx
|
||||||
res_dict = self.controller.index(req)
|
res_dict = self.controller.index(req)
|
||||||
expected = {'volumes': [{'status': 'fakestatus',
|
expected = {'volumes': [{'status': 'in-use',
|
||||||
'display_description': 'displaydesc',
|
'display_description': 'displaydesc',
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [
|
||||||
'server_id': 'fakeuuid',
|
{'attachment_id': attachment['id'],
|
||||||
|
'device': '/',
|
||||||
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1'}],
|
'volume_id': '1'}],
|
||||||
|
'multiattach': 'false',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': None,
|
'volume_type': None,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -536,11 +534,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [],
|
||||||
'server_id': 'fakeuuid',
|
'multiattach': 'false',
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -569,6 +564,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [],
|
'attachments': [],
|
||||||
|
'multiattach': 'false',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -594,11 +590,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [],
|
||||||
'server_id': 'fakeuuid',
|
'multiattach': 'false',
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'bootable': 'true',
|
'bootable': 'true',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -661,21 +654,28 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
req = fakes.HTTPRequest.blank('/v1/volumes/1')
|
req = fakes.HTTPRequest.blank('/v1/volumes/1')
|
||||||
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
||||||
req.environ['cinder.context'] = admin_ctx
|
req.environ['cinder.context'] = admin_ctx
|
||||||
res_dict = self.controller.show(req, '1')
|
res_dict = self.controller.show(req, '1')
|
||||||
expected = {'volume': {'status': 'fakestatus',
|
expected = {'volume': {'status': 'in-use',
|
||||||
'display_description': 'displaydesc',
|
'display_description': 'displaydesc',
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'display_name': 'displayname',
|
'display_name': 'displayname',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'attachments': [{'device': '/',
|
'attachments': [
|
||||||
'server_id': 'fakeuuid',
|
{'attachment_id': attachment['id'],
|
||||||
|
'device': '/',
|
||||||
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1'}],
|
'volume_id': '1'}],
|
||||||
|
'multiattach': 'false',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'volume_type': None,
|
'volume_type': None,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
|
@ -30,9 +30,6 @@ def stub_volume(id, **kwargs):
|
|||||||
'host': 'fakehost',
|
'host': 'fakehost',
|
||||||
'size': 1,
|
'size': 1,
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
'instance_uuid': 'fakeuuid',
|
|
||||||
'attached_host': None,
|
|
||||||
'mountpoint': '/',
|
|
||||||
'status': 'fakestatus',
|
'status': 'fakestatus',
|
||||||
'migration_status': None,
|
'migration_status': None,
|
||||||
'attach_status': 'attached',
|
'attach_status': 'attached',
|
||||||
@ -53,7 +50,10 @@ def stub_volume(id, **kwargs):
|
|||||||
'volume_type': {'name': 'vol_type_name'},
|
'volume_type': {'name': 'vol_type_name'},
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'replication_extended_status': None,
|
'replication_extended_status': None,
|
||||||
'replication_driver_data': None}
|
'replication_driver_data': None,
|
||||||
|
'volume_attachment': [],
|
||||||
|
'multiattach': False,
|
||||||
|
}
|
||||||
|
|
||||||
volume.update(kwargs)
|
volume.update(kwargs)
|
||||||
if kwargs.get('volume_glance_metadata', None):
|
if kwargs.get('volume_glance_metadata', None):
|
||||||
|
@ -87,12 +87,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
body = {"volume": vol}
|
body = {"volume": vol}
|
||||||
req = fakes.HTTPRequest.blank('/v2/volumes')
|
req = fakes.HTTPRequest.blank('/v2/volumes')
|
||||||
res_dict = self.controller.create(req, body)
|
res_dict = self.controller.create(req, body)
|
||||||
ex = {'volume': {'attachments':
|
ex = {'volume': {'attachments': [],
|
||||||
[{'device': '/',
|
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'availability_zone': 'zone1:host1',
|
'availability_zone': 'zone1:host1',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
@ -107,6 +102,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'name': 'Volume Test Name',
|
'name': 'Volume Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'size': 100,
|
'size': 100,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
'source_volid': None,
|
'source_volid': None,
|
||||||
@ -222,11 +218,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
"description": "Volume Test Desc",
|
"description": "Volume Test Desc",
|
||||||
"availability_zone": "nova",
|
"availability_zone": "nova",
|
||||||
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
|
"imageRef": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
|
||||||
ex = {'volume': {'attachments': [{'device': '/',
|
ex = {'volume': {'attachments': [],
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
@ -242,6 +234,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'name': 'Volume Test Name',
|
'name': 'Volume Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'size': '1',
|
'size': '1',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
'source_volid': None,
|
'source_volid': None,
|
||||||
@ -312,11 +305,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
"description": "Volume Test Desc",
|
"description": "Volume Test Desc",
|
||||||
"availability_zone": "nova",
|
"availability_zone": "nova",
|
||||||
"image_id": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
|
"image_id": 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'}
|
||||||
ex = {'volume': {'attachments': [{'device': '/',
|
ex = {'volume': {'attachments': [],
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
@ -332,6 +321,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'name': 'Volume Test Name',
|
'name': 'Volume Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'size': '1',
|
'size': '1',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
'source_volid': None,
|
'source_volid': None,
|
||||||
@ -406,11 +396,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
"description": "Volume Test Desc",
|
"description": "Volume Test Desc",
|
||||||
"availability_zone": "nova",
|
"availability_zone": "nova",
|
||||||
"imageRef": test_id}
|
"imageRef": test_id}
|
||||||
ex = {'volume': {'attachments': [{'device': '/',
|
ex = {'volume': {'attachments': [],
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'volume_id': '1'}],
|
|
||||||
'availability_zone': 'nova',
|
'availability_zone': 'nova',
|
||||||
'bootable': 'false',
|
'bootable': 'false',
|
||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
@ -426,6 +412,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'metadata': {},
|
'metadata': {},
|
||||||
'name': 'Volume Test Name',
|
'name': 'Volume Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'size': '1',
|
'size': '1',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
'source_volid': None,
|
'source_volid': None,
|
||||||
@ -500,15 +487,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'Updated Test Name',
|
'name': 'Updated Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'attachments': [
|
'multiattach': False,
|
||||||
{
|
'attachments': [],
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'device': '/',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -554,15 +534,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'Updated Test Name',
|
'name': 'Updated Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'attachments': [
|
'multiattach': False,
|
||||||
{
|
'attachments': [],
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'device': '/',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -611,15 +584,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'New Name',
|
'name': 'New Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'attachments': [
|
'multiattach': False,
|
||||||
{
|
'attachments': [],
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'device': '/',
|
|
||||||
}
|
|
||||||
],
|
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -663,13 +629,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'displayname',
|
'name': 'displayname',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'attachments': [{
|
'multiattach': False,
|
||||||
'id': '1',
|
'attachments': [],
|
||||||
'volume_id': '1',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'device': '/',
|
|
||||||
}],
|
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -707,6 +668,10 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
updates = {
|
updates = {
|
||||||
"name": "Updated Test Name",
|
"name": "Updated Test Name",
|
||||||
@ -718,7 +683,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
req.environ['cinder.context'] = admin_ctx
|
req.environ['cinder.context'] = admin_ctx
|
||||||
res_dict = self.controller.update(req, '1', body)
|
res_dict = self.controller.update(req, '1', body)
|
||||||
expected = {'volume': {
|
expected = {'volume': {
|
||||||
'status': 'fakestatus',
|
'status': 'in-use',
|
||||||
'description': 'displaydesc',
|
'description': 'displaydesc',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
@ -726,10 +691,12 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'Updated Test Name',
|
'name': 'Updated Test Name',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'attachments': [{
|
'attachments': [{
|
||||||
'id': '1',
|
'id': '1',
|
||||||
|
'attachment_id': attachment['id'],
|
||||||
'volume_id': '1',
|
'volume_id': '1',
|
||||||
'server_id': 'fakeuuid',
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'device': '/',
|
'device': '/',
|
||||||
}],
|
}],
|
||||||
@ -753,8 +720,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
}}
|
}}
|
||||||
self.assertEqual(res_dict, expected)
|
self.assertEqual(expected, res_dict)
|
||||||
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
|
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
|
||||||
|
|
||||||
def test_update_empty_body(self):
|
def test_update_empty_body(self):
|
||||||
body = {}
|
body = {}
|
||||||
@ -831,15 +798,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'displayname',
|
'name': 'displayname',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'attachments': [
|
'multiattach': False,
|
||||||
{
|
'attachments': [],
|
||||||
'device': '/',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'
|
|
||||||
}
|
|
||||||
],
|
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -877,6 +837,10 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
|
req = fakes.HTTPRequest.blank('/v2/volumes/detail')
|
||||||
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
||||||
@ -885,7 +849,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
expected = {
|
expected = {
|
||||||
'volumes': [
|
'volumes': [
|
||||||
{
|
{
|
||||||
'status': 'fakestatus',
|
'status': 'in-use',
|
||||||
'description': 'displaydesc',
|
'description': 'displaydesc',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
@ -893,10 +857,12 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'displayname',
|
'name': 'displayname',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'attachments': [
|
'attachments': [
|
||||||
{
|
{
|
||||||
|
'attachment_id': attachment['id'],
|
||||||
'device': '/',
|
'device': '/',
|
||||||
'server_id': 'fakeuuid',
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1'
|
'volume_id': '1'
|
||||||
@ -1311,15 +1277,8 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'displayname',
|
'name': 'displayname',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
'attachments': [
|
'multiattach': False,
|
||||||
{
|
'attachments': [],
|
||||||
'device': '/',
|
|
||||||
'server_id': 'fakeuuid',
|
|
||||||
'host_name': None,
|
|
||||||
'id': '1',
|
|
||||||
'volume_id': '1'
|
|
||||||
}
|
|
||||||
],
|
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
@ -1362,6 +1321,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'displayname',
|
'name': 'displayname',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'attachments': [],
|
'attachments': [],
|
||||||
'user_id': 'fakeuser',
|
'user_id': 'fakeuser',
|
||||||
'volume_type': 'vol_type_name',
|
'volume_type': 'vol_type_name',
|
||||||
@ -1406,6 +1366,10 @@ class VolumeApiTest(test.TestCase):
|
|||||||
{"readonly": "True",
|
{"readonly": "True",
|
||||||
"invisible_key": "invisible_value"},
|
"invisible_key": "invisible_value"},
|
||||||
False)
|
False)
|
||||||
|
values = {'volume_id': '1', }
|
||||||
|
attachment = db.volume_attach(context.get_admin_context(), values)
|
||||||
|
db.volume_attached(context.get_admin_context(),
|
||||||
|
attachment['id'], stubs.FAKE_UUID, None, '/')
|
||||||
|
|
||||||
req = fakes.HTTPRequest.blank('/v2/volumes/1')
|
req = fakes.HTTPRequest.blank('/v2/volumes/1')
|
||||||
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
|
||||||
@ -1413,7 +1377,7 @@ class VolumeApiTest(test.TestCase):
|
|||||||
res_dict = self.controller.show(req, '1')
|
res_dict = self.controller.show(req, '1')
|
||||||
expected = {
|
expected = {
|
||||||
'volume': {
|
'volume': {
|
||||||
'status': 'fakestatus',
|
'status': 'in-use',
|
||||||
'description': 'displaydesc',
|
'description': 'displaydesc',
|
||||||
'encrypted': False,
|
'encrypted': False,
|
||||||
'availability_zone': 'fakeaz',
|
'availability_zone': 'fakeaz',
|
||||||
@ -1421,10 +1385,12 @@ class VolumeApiTest(test.TestCase):
|
|||||||
'consistencygroup_id': None,
|
'consistencygroup_id': None,
|
||||||
'name': 'displayname',
|
'name': 'displayname',
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': False,
|
||||||
'attachments': [
|
'attachments': [
|
||||||
{
|
{
|
||||||
|
'attachment_id': attachment['id'],
|
||||||
'device': '/',
|
'device': '/',
|
||||||
'server_id': 'fakeuuid',
|
'server_id': stubs.FAKE_UUID,
|
||||||
'host_name': None,
|
'host_name': None,
|
||||||
'id': '1',
|
'id': '1',
|
||||||
'volume_id': '1'
|
'volume_id': '1'
|
||||||
|
@ -105,6 +105,13 @@ class BaseBackupTest(test.TestCase):
|
|||||||
vol['attach_status'] = 'detached'
|
vol['attach_status'] = 'detached'
|
||||||
return db.volume_create(self.ctxt, vol)['id']
|
return db.volume_create(self.ctxt, vol)['id']
|
||||||
|
|
||||||
|
def _create_volume_attach(self, volume_id):
|
||||||
|
values = {'volume_id': volume_id,
|
||||||
|
'attach_status': 'attached', }
|
||||||
|
attachment = db.volume_attach(self.ctxt, values)
|
||||||
|
db.volume_attached(self.ctxt, attachment['id'], None, 'testhost',
|
||||||
|
'/dev/vd0')
|
||||||
|
|
||||||
def _create_exported_record_entry(self, vol_size=1):
|
def _create_exported_record_entry(self, vol_size=1):
|
||||||
"""Create backup metadata export entry."""
|
"""Create backup metadata export entry."""
|
||||||
vol_id = self._create_volume_db_entry(status='available',
|
vol_id = self._create_volume_db_entry(status='available',
|
||||||
@ -138,8 +145,12 @@ class BackupTestCase(BaseBackupTest):
|
|||||||
"""Make sure stuck volumes and backups are reset to correct
|
"""Make sure stuck volumes and backups are reset to correct
|
||||||
states when backup_manager.init_host() is called
|
states when backup_manager.init_host() is called
|
||||||
"""
|
"""
|
||||||
vol1_id = self._create_volume_db_entry(status='backing-up')
|
vol1_id = self._create_volume_db_entry()
|
||||||
vol2_id = self._create_volume_db_entry(status='restoring-backup')
|
self._create_volume_attach(vol1_id)
|
||||||
|
db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
|
||||||
|
vol2_id = self._create_volume_db_entry()
|
||||||
|
self._create_volume_attach(vol2_id)
|
||||||
|
db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
|
||||||
backup1_id = self._create_backup_db_entry(status='creating')
|
backup1_id = self._create_backup_db_entry(status='creating')
|
||||||
backup2_id = self._create_backup_db_entry(status='restoring')
|
backup2_id = self._create_backup_db_entry(status='restoring')
|
||||||
backup3_id = self._create_backup_db_entry(status='deleting')
|
backup3_id = self._create_backup_db_entry(status='deleting')
|
||||||
|
@ -232,26 +232,36 @@ class DBAPIVolumeTestCase(BaseTest):
|
|||||||
def test_volume_attached_to_instance(self):
|
def test_volume_attached_to_instance(self):
|
||||||
volume = db.volume_create(self.ctxt, {'host': 'host1'})
|
volume = db.volume_create(self.ctxt, {'host': 'host1'})
|
||||||
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||||
db.volume_attached(self.ctxt, volume['id'],
|
values = {'volume_id': volume['id'],
|
||||||
|
'instance_uuid': instance_uuid,
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
attachment = db.volume_attach(self.ctxt, values)
|
||||||
|
db.volume_attached(self.ctxt, attachment['id'],
|
||||||
instance_uuid, None, '/tmp')
|
instance_uuid, None, '/tmp')
|
||||||
volume = db.volume_get(self.ctxt, volume['id'])
|
volume = db.volume_get(self.ctxt, volume['id'])
|
||||||
self.assertEqual(volume['status'], 'in-use')
|
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
|
||||||
self.assertEqual(volume['mountpoint'], '/tmp')
|
self.assertEqual('in-use', volume['status'])
|
||||||
self.assertEqual(volume['attach_status'], 'attached')
|
self.assertEqual('/tmp', attachment['mountpoint'])
|
||||||
self.assertEqual(volume['instance_uuid'], instance_uuid)
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
self.assertIsNone(volume['attached_host'])
|
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||||
|
self.assertIsNone(attachment['attached_host'])
|
||||||
|
|
||||||
def test_volume_attached_to_host(self):
|
def test_volume_attached_to_host(self):
|
||||||
volume = db.volume_create(self.ctxt, {'host': 'host1'})
|
volume = db.volume_create(self.ctxt, {'host': 'host1'})
|
||||||
host_name = 'fake_host'
|
host_name = 'fake_host'
|
||||||
db.volume_attached(self.ctxt, volume['id'],
|
values = {'volume_id': volume['id'],
|
||||||
|
'attached_host': host_name,
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
attachment = db.volume_attach(self.ctxt, values)
|
||||||
|
db.volume_attached(self.ctxt, attachment['id'],
|
||||||
None, host_name, '/tmp')
|
None, host_name, '/tmp')
|
||||||
volume = db.volume_get(self.ctxt, volume['id'])
|
volume = db.volume_get(self.ctxt, volume['id'])
|
||||||
self.assertEqual(volume['status'], 'in-use')
|
attachment = db.volume_attachment_get(self.ctxt, attachment['id'])
|
||||||
self.assertEqual(volume['mountpoint'], '/tmp')
|
self.assertEqual('in-use', volume['status'])
|
||||||
self.assertEqual(volume['attach_status'], 'attached')
|
self.assertEqual('/tmp', attachment['mountpoint'])
|
||||||
self.assertIsNone(volume['instance_uuid'])
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
self.assertEqual(volume['attached_host'], host_name)
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
|
self.assertEqual(attachment['attached_host'], host_name)
|
||||||
|
|
||||||
def test_volume_data_get_for_host(self):
|
def test_volume_data_get_for_host(self):
|
||||||
for i in xrange(3):
|
for i in xrange(3):
|
||||||
@ -276,28 +286,38 @@ class DBAPIVolumeTestCase(BaseTest):
|
|||||||
|
|
||||||
def test_volume_detached_from_instance(self):
|
def test_volume_detached_from_instance(self):
|
||||||
volume = db.volume_create(self.ctxt, {})
|
volume = db.volume_create(self.ctxt, {})
|
||||||
db.volume_attached(self.ctxt, volume['id'],
|
instance_uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
|
||||||
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
|
values = {'volume_id': volume['id'],
|
||||||
|
'instance_uuid': instance_uuid,
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
attachment = db.volume_attach(self.ctxt, values)
|
||||||
|
db.volume_attached(self.ctxt, attachment['id'],
|
||||||
|
instance_uuid,
|
||||||
None, '/tmp')
|
None, '/tmp')
|
||||||
db.volume_detached(self.ctxt, volume['id'])
|
db.volume_detached(self.ctxt, volume['id'], attachment['id'])
|
||||||
volume = db.volume_get(self.ctxt, volume['id'])
|
volume = db.volume_get(self.ctxt, volume['id'])
|
||||||
|
self.assertRaises(exception.VolumeAttachmentNotFound,
|
||||||
|
db.volume_attachment_get,
|
||||||
|
self.ctxt,
|
||||||
|
attachment['id'])
|
||||||
self.assertEqual('available', volume['status'])
|
self.assertEqual('available', volume['status'])
|
||||||
self.assertEqual('detached', volume['attach_status'])
|
|
||||||
self.assertIsNone(volume['mountpoint'])
|
|
||||||
self.assertIsNone(volume['instance_uuid'])
|
|
||||||
self.assertIsNone(volume['attached_host'])
|
|
||||||
|
|
||||||
def test_volume_detached_from_host(self):
|
def test_volume_detached_from_host(self):
|
||||||
volume = db.volume_create(self.ctxt, {})
|
volume = db.volume_create(self.ctxt, {})
|
||||||
db.volume_attached(self.ctxt, volume['id'],
|
host_name = 'fake_host'
|
||||||
None, 'fake_host', '/tmp')
|
values = {'volume_id': volume['id'],
|
||||||
db.volume_detached(self.ctxt, volume['id'])
|
'attach_host': host_name,
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
attachment = db.volume_attach(self.ctxt, values)
|
||||||
|
db.volume_attached(self.ctxt, attachment['id'],
|
||||||
|
None, host_name, '/tmp')
|
||||||
|
db.volume_detached(self.ctxt, volume['id'], attachment['id'])
|
||||||
volume = db.volume_get(self.ctxt, volume['id'])
|
volume = db.volume_get(self.ctxt, volume['id'])
|
||||||
|
self.assertRaises(exception.VolumeAttachmentNotFound,
|
||||||
|
db.volume_attachment_get,
|
||||||
|
self.ctxt,
|
||||||
|
attachment['id'])
|
||||||
self.assertEqual('available', volume['status'])
|
self.assertEqual('available', volume['status'])
|
||||||
self.assertEqual('detached', volume['attach_status'])
|
|
||||||
self.assertIsNone(volume['mountpoint'])
|
|
||||||
self.assertIsNone(volume['instance_uuid'])
|
|
||||||
self.assertIsNone(volume['attached_host'])
|
|
||||||
|
|
||||||
def test_volume_get(self):
|
def test_volume_get(self):
|
||||||
volume = db.volume_create(self.ctxt, {})
|
volume = db.volume_create(self.ctxt, {})
|
||||||
|
@ -1770,7 +1770,8 @@ class HP3PARBaseDriver(object):
|
|||||||
with mock.patch.object(hpcommon.HP3PARCommon,
|
with mock.patch.object(hpcommon.HP3PARCommon,
|
||||||
'_create_client') as mock_create_client:
|
'_create_client') as mock_create_client:
|
||||||
mock_create_client.return_value = mock_client
|
mock_create_client.return_value = mock_client
|
||||||
self.driver.detach_volume(context.get_admin_context(), self.volume)
|
self.driver.detach_volume(context.get_admin_context(), self.volume,
|
||||||
|
None)
|
||||||
expected = [
|
expected = [
|
||||||
mock.call.removeVolumeMetaData(
|
mock.call.removeVolumeMetaData(
|
||||||
self.VOLUME_3PAR_NAME,
|
self.VOLUME_3PAR_NAME,
|
||||||
@ -1784,7 +1785,7 @@ class HP3PARBaseDriver(object):
|
|||||||
self.assertRaises(exception.CinderException,
|
self.assertRaises(exception.CinderException,
|
||||||
self.driver.detach_volume,
|
self.driver.detach_volume,
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
self.volume)
|
self.volume, None)
|
||||||
|
|
||||||
def test_create_snapshot(self):
|
def test_create_snapshot(self):
|
||||||
# setup_mock_client drive with default configuration
|
# setup_mock_client drive with default configuration
|
||||||
|
@ -772,6 +772,41 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
|
|||||||
backups = db_utils.get_table(engine, 'backups')
|
backups = db_utils.get_table(engine, 'backups')
|
||||||
self.assertNotIn('parent_id', backups.c)
|
self.assertNotIn('parent_id', backups.c)
|
||||||
|
|
||||||
|
def _check_40(self, engine, data):
|
||||||
|
volumes = db_utils.get_table(engine, 'volumes')
|
||||||
|
self.assertNotIn('instance_uuid', volumes.c)
|
||||||
|
self.assertNotIn('attached_host', volumes.c)
|
||||||
|
self.assertNotIn('attach_time', volumes.c)
|
||||||
|
self.assertNotIn('mountpoint', volumes.c)
|
||||||
|
self.assertIsInstance(volumes.c.multiattach.type,
|
||||||
|
self.BOOL_TYPE)
|
||||||
|
|
||||||
|
attachments = db_utils.get_table(engine, 'volume_attachment')
|
||||||
|
self.assertIsInstance(attachments.c.attach_mode.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(attachments.c.instance_uuid.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(attachments.c.attached_host.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(attachments.c.mountpoint.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(attachments.c.attach_status.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
|
||||||
|
def _post_downgrade_040(self, engine):
|
||||||
|
self.assertFalse(engine.dialect.has_table(engine.connect(),
|
||||||
|
"volume_attachment"))
|
||||||
|
volumes = db_utils.get_table(engine, 'volumes')
|
||||||
|
self.assertNotIn('multiattach', volumes.c)
|
||||||
|
self.assertIsInstance(volumes.c.instance_uuid.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(volumes.c.attached_host.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(volumes.c.attach_time.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
self.assertIsInstance(volumes.c.mountpoint.type,
|
||||||
|
sqlalchemy.types.VARCHAR)
|
||||||
|
|
||||||
def test_walk_versions(self):
|
def test_walk_versions(self):
|
||||||
self.walk_versions(True, False)
|
self.walk_versions(True, False)
|
||||||
|
|
||||||
|
@ -430,7 +430,6 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
'volume_id': volume_id,
|
'volume_id': volume_id,
|
||||||
'volume_type': None,
|
'volume_type': None,
|
||||||
'snapshot_id': None,
|
'snapshot_id': None,
|
||||||
'instance_uuid': None,
|
|
||||||
'user_id': 'fake',
|
'user_id': 'fake',
|
||||||
'launched_at': 'DONTCARE',
|
'launched_at': 'DONTCARE',
|
||||||
'size': 1,
|
'size': 1,
|
||||||
@ -1748,14 +1747,15 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
**self.volume_params)
|
**self.volume_params)
|
||||||
volume_id = volume['id']
|
volume_id = volume['id']
|
||||||
self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
self.volume.attach_volume(self.context, volume_id, instance_uuid,
|
attachment = self.volume.attach_volume(self.context, volume_id,
|
||||||
None, mountpoint, 'ro')
|
instance_uuid, None,
|
||||||
|
mountpoint, 'ro')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "in-use")
|
self.assertEqual(vol['status'], "in-use")
|
||||||
self.assertEqual(vol['attach_status'], "attached")
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||||
self.assertIsNone(vol['attached_host'])
|
self.assertIsNone(attachment['attached_host'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(len(admin_metadata), 2)
|
||||||
expected = dict(readonly='True', attached_mode='ro')
|
expected = dict(readonly='True', attached_mode='ro')
|
||||||
@ -1767,15 +1767,220 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
conn_info = self.volume.initialize_connection(self.context,
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
volume_id, connector)
|
volume_id, connector)
|
||||||
self.assertEqual(conn_info['data']['access_mode'], 'ro')
|
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
self.assertRaises(exception.VolumeAttached,
|
self.assertRaises(exception.VolumeAttached,
|
||||||
self.volume.delete_volume,
|
self.volume.delete_volume,
|
||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('available', vol['status'])
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
|
db.volume_get,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
def test_detach_invalid_attachment_id(self):
|
||||||
|
"""Make sure if the attachment id isn't found we raise."""
|
||||||
|
attachment_id = "notfoundid"
|
||||||
|
volume_id = "abc123"
|
||||||
|
self.assertRaises(exception.VolumeAttachmentNotFound,
|
||||||
|
self.volume.detach_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id,
|
||||||
|
attachment_id)
|
||||||
|
|
||||||
|
def test_run_attach_detach_volume_for_instance_no_attachment_id(self):
|
||||||
|
"""Make sure volume can be attached and detached from instance."""
|
||||||
|
mountpoint = "/dev/sdf"
|
||||||
|
# attach volume to the instance then to detach
|
||||||
|
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
||||||
|
instance_uuid_2 = '12345678-4321-8765-4321-567812345678'
|
||||||
|
volume = tests_utils.create_volume(self.context,
|
||||||
|
admin_metadata={'readonly': 'True'},
|
||||||
|
multiattach=True,
|
||||||
|
**self.volume_params)
|
||||||
|
volume_id = volume['id']
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
attachment = self.volume.attach_volume(self.context, volume_id,
|
||||||
|
instance_uuid, None,
|
||||||
|
mountpoint, 'ro')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
|
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||||
|
self.assertIsNone(attachment['attached_host'])
|
||||||
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
|
self.assertEqual(2, len(admin_metadata))
|
||||||
|
expected = dict(readonly='True', attached_mode='ro')
|
||||||
|
ret = {}
|
||||||
|
for item in admin_metadata:
|
||||||
|
ret.update({item['key']: item['value']})
|
||||||
|
self.assertDictMatch(ret, expected)
|
||||||
|
attachment2 = self.volume.attach_volume(self.context, volume_id,
|
||||||
|
instance_uuid_2, None,
|
||||||
|
mountpoint, 'ro')
|
||||||
|
|
||||||
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
|
volume_id, connector)
|
||||||
|
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||||
|
self.assertRaises(exception.VolumeAttached,
|
||||||
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
self.assertRaises(exception.InvalidVolume,
|
||||||
|
self.volume.detach_volume,
|
||||||
|
self.context, volume_id)
|
||||||
|
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('available', vol['status'])
|
||||||
|
|
||||||
|
attachment = self.volume.attach_volume(self.context, volume_id,
|
||||||
|
instance_uuid, None,
|
||||||
|
mountpoint, 'ro')
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
self.volume.detach_volume(self.context, volume_id)
|
self.volume.detach_volume(self.context, volume_id)
|
||||||
vol = db.volume_get(self.context, volume_id)
|
vol = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(vol['status'], "available")
|
self.assertEqual('available', vol['status'])
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
|
db.volume_get,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
def test_run_attach_detach_multiattach_volume_for_instances(self):
|
||||||
|
"""Make sure volume can be attached to multiple instances."""
|
||||||
|
mountpoint = "/dev/sdf"
|
||||||
|
# attach volume to the instance then to detach
|
||||||
|
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
||||||
|
volume = tests_utils.create_volume(self.context,
|
||||||
|
admin_metadata={'readonly': 'True'},
|
||||||
|
multiattach=True,
|
||||||
|
**self.volume_params)
|
||||||
|
volume_id = volume['id']
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
attachment = self.volume.attach_volume(self.context, volume_id,
|
||||||
|
instance_uuid, None,
|
||||||
|
mountpoint, 'ro')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual(True, vol['multiattach'])
|
||||||
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
|
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||||
|
self.assertIsNone(attachment['attached_host'])
|
||||||
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
|
self.assertEqual(2, len(admin_metadata))
|
||||||
|
expected = dict(readonly='True', attached_mode='ro')
|
||||||
|
ret = {}
|
||||||
|
for item in admin_metadata:
|
||||||
|
ret.update({item['key']: item['value']})
|
||||||
|
self.assertDictMatch(ret, expected)
|
||||||
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
|
volume_id, connector)
|
||||||
|
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
|
instance2_uuid = '12345678-1234-5678-1234-567812345000'
|
||||||
|
mountpoint2 = "/dev/sdx"
|
||||||
|
attachment2 = self.volume.attach_volume(self.context, volume_id,
|
||||||
|
instance2_uuid, None,
|
||||||
|
mountpoint2, 'ro')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual(True, vol['multiattach'])
|
||||||
|
self.assertEqual('attached', attachment2['attach_status'])
|
||||||
|
self.assertEqual(mountpoint2, attachment2['mountpoint'])
|
||||||
|
self.assertEqual(instance2_uuid, attachment2['instance_uuid'])
|
||||||
|
self.assertIsNone(attachment2['attached_host'])
|
||||||
|
self.assertNotEqual(attachment, attachment2)
|
||||||
|
|
||||||
|
self.assertRaises(exception.VolumeAttached,
|
||||||
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
|
||||||
|
self.assertRaises(exception.VolumeAttached,
|
||||||
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('available', vol['status'])
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
|
db.volume_get,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
def test_attach_detach_not_multiattach_volume_for_instances(self):
|
||||||
|
"""Make sure volume can't be attached to more than one instance."""
|
||||||
|
mountpoint = "/dev/sdf"
|
||||||
|
# attach volume to the instance then to detach
|
||||||
|
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
||||||
|
volume = tests_utils.create_volume(self.context,
|
||||||
|
admin_metadata={'readonly': 'True'},
|
||||||
|
multiattach=False,
|
||||||
|
**self.volume_params)
|
||||||
|
volume_id = volume['id']
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
attachment = self.volume.attach_volume(self.context, volume_id,
|
||||||
|
instance_uuid, None,
|
||||||
|
mountpoint, 'ro')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual(False, vol['multiattach'])
|
||||||
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
|
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||||
|
self.assertIsNone(attachment['attached_host'])
|
||||||
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
|
self.assertEqual(2, len(admin_metadata))
|
||||||
|
expected = dict(readonly='True', attached_mode='ro')
|
||||||
|
ret = {}
|
||||||
|
for item in admin_metadata:
|
||||||
|
ret.update({item['key']: item['value']})
|
||||||
|
self.assertDictMatch(ret, expected)
|
||||||
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
|
volume_id, connector)
|
||||||
|
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
|
instance2_uuid = '12345678-1234-5678-1234-567812345000'
|
||||||
|
mountpoint2 = "/dev/sdx"
|
||||||
|
self.assertRaises(exception.InvalidVolume,
|
||||||
|
self.volume.attach_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id,
|
||||||
|
instance2_uuid,
|
||||||
|
None,
|
||||||
|
mountpoint2, 'ro')
|
||||||
|
|
||||||
|
self.assertRaises(exception.VolumeAttached,
|
||||||
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('available', vol['status'])
|
||||||
|
|
||||||
self.volume.delete_volume(self.context, volume_id)
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertRaises(exception.VolumeNotFound,
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
@ -1792,17 +1997,17 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
**self.volume_params)
|
**self.volume_params)
|
||||||
volume_id = volume['id']
|
volume_id = volume['id']
|
||||||
self.volume.create_volume(self.context, volume_id)
|
self.volume.create_volume(self.context, volume_id)
|
||||||
self.volume.attach_volume(self.context, volume_id, None,
|
attachment = self.volume.attach_volume(self.context, volume_id, None,
|
||||||
'fake_host', mountpoint, 'rw')
|
'fake_host', mountpoint, 'rw')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "in-use")
|
self.assertEqual('in-use', vol['status'])
|
||||||
self.assertEqual(vol['attach_status'], "attached")
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
self.assertIsNone(vol['instance_uuid'])
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
# sanitized, conforms to RFC-952 and RFC-1123 specs.
|
# sanitized, conforms to RFC-952 and RFC-1123 specs.
|
||||||
self.assertEqual(vol['attached_host'], 'fake-host')
|
self.assertEqual(attachment['attached_host'], 'fake-host')
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(2, len(admin_metadata))
|
||||||
expected = dict(readonly='False', attached_mode='rw')
|
expected = dict(readonly='False', attached_mode='rw')
|
||||||
ret = {}
|
ret = {}
|
||||||
for item in admin_metadata:
|
for item in admin_metadata:
|
||||||
@ -1812,13 +2017,13 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
conn_info = self.volume.initialize_connection(self.context,
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
volume_id, connector)
|
volume_id, connector)
|
||||||
self.assertEqual(conn_info['data']['access_mode'], 'rw')
|
self.assertEqual('rw', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
self.assertRaises(exception.VolumeAttached,
|
self.assertRaises(exception.VolumeAttached,
|
||||||
self.volume.delete_volume,
|
self.volume.delete_volume,
|
||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
self.volume.detach_volume(self.context, volume_id)
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
vol = db.volume_get(self.context, volume_id)
|
vol = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(vol['status'], "available")
|
self.assertEqual(vol['status'], "available")
|
||||||
|
|
||||||
@ -1828,6 +2033,131 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
self.context,
|
self.context,
|
||||||
volume_id)
|
volume_id)
|
||||||
|
|
||||||
|
def test_run_attach_detach_multiattach_volume_for_hosts(self):
|
||||||
|
"""Make sure volume can be attached and detached from hosts."""
|
||||||
|
mountpoint = "/dev/sdf"
|
||||||
|
volume = tests_utils.create_volume(
|
||||||
|
self.context,
|
||||||
|
admin_metadata={'readonly': 'False'},
|
||||||
|
multiattach=True,
|
||||||
|
**self.volume_params)
|
||||||
|
volume_id = volume['id']
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
attachment = self.volume.attach_volume(self.context, volume_id, None,
|
||||||
|
'fake_host', mountpoint, 'rw')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual(True, vol['multiattach'])
|
||||||
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
|
# sanitized, conforms to RFC-952 and RFC-1123 specs.
|
||||||
|
self.assertEqual(attachment['attached_host'], 'fake-host')
|
||||||
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
|
self.assertEqual(2, len(admin_metadata))
|
||||||
|
expected = dict(readonly='False', attached_mode='rw')
|
||||||
|
ret = {}
|
||||||
|
for item in admin_metadata:
|
||||||
|
ret.update({item['key']: item['value']})
|
||||||
|
self.assertDictMatch(ret, expected)
|
||||||
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
|
volume_id, connector)
|
||||||
|
self.assertEqual(conn_info['data']['access_mode'], 'rw')
|
||||||
|
|
||||||
|
mountpoint2 = "/dev/sdx"
|
||||||
|
attachment2 = self.volume.attach_volume(self.context, volume_id, None,
|
||||||
|
'fake_host2', mountpoint2,
|
||||||
|
'rw')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual('attached', attachment2['attach_status'])
|
||||||
|
self.assertEqual(mountpoint2, attachment2['mountpoint'])
|
||||||
|
self.assertIsNone(attachment2['instance_uuid'])
|
||||||
|
# sanitized, conforms to RFC-952 and RFC-1123 specs.
|
||||||
|
self.assertEqual('fake-host2', attachment2['attached_host'])
|
||||||
|
|
||||||
|
self.assertRaises(exception.VolumeAttached,
|
||||||
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual(vol['status'], "in-use")
|
||||||
|
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment2['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual(vol['status'], "available")
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
|
db.volume_get,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
def test_run_attach_detach_not_multiattach_volume_for_hosts(self):
|
||||||
|
"""Make sure volume can't be attached to more than one host."""
|
||||||
|
mountpoint = "/dev/sdf"
|
||||||
|
volume = tests_utils.create_volume(
|
||||||
|
self.context,
|
||||||
|
admin_metadata={'readonly': 'False'},
|
||||||
|
multiattach=False,
|
||||||
|
**self.volume_params)
|
||||||
|
volume_id = volume['id']
|
||||||
|
self.volume.create_volume(self.context, volume_id)
|
||||||
|
attachment = self.volume.attach_volume(self.context, volume_id, None,
|
||||||
|
'fake_host', mountpoint, 'rw')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual(False, vol['multiattach'])
|
||||||
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
|
# sanitized, conforms to RFC-952 and RFC-1123 specs.
|
||||||
|
self.assertEqual(attachment['attached_host'], 'fake-host')
|
||||||
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
|
self.assertEqual(2, len(admin_metadata))
|
||||||
|
expected = dict(readonly='False', attached_mode='rw')
|
||||||
|
ret = {}
|
||||||
|
for item in admin_metadata:
|
||||||
|
ret.update({item['key']: item['value']})
|
||||||
|
self.assertDictMatch(ret, expected)
|
||||||
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
|
volume_id, connector)
|
||||||
|
self.assertEqual('rw', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
|
mountpoint2 = "/dev/sdx"
|
||||||
|
self.assertRaises(exception.InvalidVolume,
|
||||||
|
self.volume.attach_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id,
|
||||||
|
None,
|
||||||
|
'fake_host2',
|
||||||
|
mountpoint2,
|
||||||
|
'rw')
|
||||||
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
|
self.assertEqual('in-use', vol['status'])
|
||||||
|
self.assertEqual('attached', attachment['attach_status'])
|
||||||
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
|
# sanitized, conforms to RFC-952 and RFC-1123 specs.
|
||||||
|
self.assertEqual('fake-host', attachment['attached_host'])
|
||||||
|
|
||||||
|
self.assertRaises(exception.VolumeAttached,
|
||||||
|
self.volume.delete_volume,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
|
vol = db.volume_get(self.context, volume_id)
|
||||||
|
self.assertEqual('available', vol['status'])
|
||||||
|
|
||||||
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
|
db.volume_get,
|
||||||
|
self.context,
|
||||||
|
volume_id)
|
||||||
|
|
||||||
def test_run_attach_detach_volume_with_attach_mode(self):
|
def test_run_attach_detach_volume_with_attach_mode(self):
|
||||||
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
||||||
mountpoint = "/dev/sdf"
|
mountpoint = "/dev/sdf"
|
||||||
@ -1835,21 +2165,18 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
admin_metadata={'readonly': 'True'},
|
admin_metadata={'readonly': 'True'},
|
||||||
**self.volume_params)
|
**self.volume_params)
|
||||||
volume_id = volume['id']
|
volume_id = volume['id']
|
||||||
db.volume_update(self.context, volume_id, {'status': 'available',
|
db.volume_update(self.context, volume_id, {'status': 'available', })
|
||||||
'mountpoint': None,
|
|
||||||
'instance_uuid': None,
|
|
||||||
'attached_host': None,
|
|
||||||
'attached_mode': None})
|
|
||||||
self.volume.attach_volume(self.context, volume_id, instance_uuid,
|
self.volume.attach_volume(self.context, volume_id, instance_uuid,
|
||||||
None, mountpoint, 'ro')
|
None, mountpoint, 'ro')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "in-use")
|
attachment = vol['volume_attachment'][0]
|
||||||
self.assertEqual(vol['attach_status'], "attached")
|
self.assertEqual('in-use', vol['status'])
|
||||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
self.assertEqual('attached', vol['attach_status'])
|
||||||
self.assertEqual(vol['instance_uuid'], instance_uuid)
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
self.assertIsNone(vol['attached_host'])
|
self.assertEqual(instance_uuid, attachment['instance_uuid'])
|
||||||
|
self.assertIsNone(attachment['attached_host'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(2, len(admin_metadata))
|
||||||
expected = dict(readonly='True', attached_mode='ro')
|
expected = dict(readonly='True', attached_mode='ro')
|
||||||
ret = {}
|
ret = {}
|
||||||
for item in admin_metadata:
|
for item in admin_metadata:
|
||||||
@ -1859,30 +2186,30 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
conn_info = self.volume.initialize_connection(self.context,
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
volume_id, connector)
|
volume_id, connector)
|
||||||
|
|
||||||
self.assertEqual(conn_info['data']['access_mode'], 'ro')
|
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
self.volume.detach_volume(self.context, volume_id)
|
self.volume.detach_volume(self.context, volume_id, attachment['id'])
|
||||||
vol = db.volume_get(self.context, volume_id)
|
vol = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(vol['status'], "available")
|
attachment = vol['volume_attachment']
|
||||||
self.assertEqual(vol['attach_status'], "detached")
|
self.assertEqual('available', vol['status'])
|
||||||
self.assertIsNone(vol['mountpoint'])
|
self.assertEqual('detached', vol['attach_status'])
|
||||||
self.assertIsNone(vol['instance_uuid'])
|
self.assertEqual(attachment, [])
|
||||||
self.assertIsNone(vol['attached_host'])
|
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 1)
|
self.assertEqual(1, len(admin_metadata))
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual('readonly', admin_metadata[0]['key'])
|
||||||
self.assertEqual(admin_metadata[0]['value'], 'True')
|
self.assertEqual('True', admin_metadata[0]['value'])
|
||||||
|
|
||||||
self.volume.attach_volume(self.context, volume_id, None,
|
self.volume.attach_volume(self.context, volume_id, None,
|
||||||
'fake_host', mountpoint, 'ro')
|
'fake_host', mountpoint, 'ro')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "in-use")
|
attachment = vol['volume_attachment'][0]
|
||||||
self.assertEqual(vol['attach_status'], "attached")
|
self.assertEqual('in-use', vol['status'])
|
||||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
self.assertEqual('attached', vol['attach_status'])
|
||||||
self.assertIsNone(vol['instance_uuid'])
|
self.assertEqual(mountpoint, attachment['mountpoint'])
|
||||||
self.assertEqual(vol['attached_host'], 'fake-host')
|
self.assertIsNone(attachment['instance_uuid'])
|
||||||
|
self.assertEqual('fake-host', attachment['attached_host'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(2, len(admin_metadata))
|
||||||
expected = dict(readonly='True', attached_mode='ro')
|
expected = dict(readonly='True', attached_mode='ro')
|
||||||
ret = {}
|
ret = {}
|
||||||
for item in admin_metadata:
|
for item in admin_metadata:
|
||||||
@ -1891,19 +2218,19 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
|
||||||
conn_info = self.volume.initialize_connection(self.context,
|
conn_info = self.volume.initialize_connection(self.context,
|
||||||
volume_id, connector)
|
volume_id, connector)
|
||||||
self.assertEqual(conn_info['data']['access_mode'], 'ro')
|
self.assertEqual('ro', conn_info['data']['access_mode'])
|
||||||
|
|
||||||
self.volume.detach_volume(self.context, volume_id)
|
self.volume.detach_volume(self.context, volume_id,
|
||||||
|
attachment['id'])
|
||||||
vol = db.volume_get(self.context, volume_id)
|
vol = db.volume_get(self.context, volume_id)
|
||||||
self.assertEqual(vol['status'], "available")
|
attachment = vol['volume_attachment']
|
||||||
self.assertEqual(vol['attach_status'], "detached")
|
self.assertEqual('available', vol['status'])
|
||||||
self.assertIsNone(vol['mountpoint'])
|
self.assertEqual('detached', vol['attach_status'])
|
||||||
self.assertIsNone(vol['instance_uuid'])
|
self.assertEqual(attachment, [])
|
||||||
self.assertIsNone(vol['attached_host'])
|
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 1)
|
self.assertEqual(1, len(admin_metadata))
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual('readonly', admin_metadata[0]['key'])
|
||||||
self.assertEqual(admin_metadata[0]['value'], 'True')
|
self.assertEqual('True', admin_metadata[0]['value'])
|
||||||
|
|
||||||
self.volume.delete_volume(self.context, volume_id)
|
self.volume.delete_volume(self.context, volume_id)
|
||||||
self.assertRaises(exception.VolumeNotFound,
|
self.assertRaises(exception.VolumeNotFound,
|
||||||
@ -1929,10 +2256,10 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
mountpoint,
|
mountpoint,
|
||||||
'rw')
|
'rw')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "error_attaching")
|
self.assertEqual('error_attaching', vol['status'])
|
||||||
self.assertEqual(vol['attach_status'], "detached")
|
self.assertEqual('detached', vol['attach_status'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(2, len(admin_metadata))
|
||||||
expected = dict(readonly='True', attached_mode='rw')
|
expected = dict(readonly='True', attached_mode='rw')
|
||||||
ret = {}
|
ret = {}
|
||||||
for item in admin_metadata:
|
for item in admin_metadata:
|
||||||
@ -1949,10 +2276,10 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
mountpoint,
|
mountpoint,
|
||||||
'rw')
|
'rw')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['status'], "error_attaching")
|
self.assertEqual('error_attaching', vol['status'])
|
||||||
self.assertEqual(vol['attach_status'], "detached")
|
self.assertEqual('detached', vol['attach_status'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 2)
|
self.assertEqual(2, len(admin_metadata))
|
||||||
expected = dict(readonly='True', attached_mode='rw')
|
expected = dict(readonly='True', attached_mode='rw')
|
||||||
ret = {}
|
ret = {}
|
||||||
for item in admin_metadata:
|
for item in admin_metadata:
|
||||||
@ -1978,11 +2305,11 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
mountpoint,
|
mountpoint,
|
||||||
'rw')
|
'rw')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['attach_status'], "detached")
|
self.assertEqual('detached', vol['attach_status'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 1)
|
self.assertEqual(1, len(admin_metadata))
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual('readonly', admin_metadata[0]['key'])
|
||||||
self.assertEqual(admin_metadata[0]['value'], 'True')
|
self.assertEqual('True', admin_metadata[0]['value'])
|
||||||
|
|
||||||
db.volume_update(self.context, volume_id, {'status': 'available'})
|
db.volume_update(self.context, volume_id, {'status': 'available'})
|
||||||
self.assertRaises(exception.InvalidVolumeAttachMode,
|
self.assertRaises(exception.InvalidVolumeAttachMode,
|
||||||
@ -1994,11 +2321,11 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
mountpoint,
|
mountpoint,
|
||||||
'rw')
|
'rw')
|
||||||
vol = db.volume_get(context.get_admin_context(), volume_id)
|
vol = db.volume_get(context.get_admin_context(), volume_id)
|
||||||
self.assertEqual(vol['attach_status'], "detached")
|
self.assertEqual('detached', vol['attach_status'])
|
||||||
admin_metadata = vol['volume_admin_metadata']
|
admin_metadata = vol['volume_admin_metadata']
|
||||||
self.assertEqual(len(admin_metadata), 1)
|
self.assertEqual(1, len(admin_metadata))
|
||||||
self.assertEqual(admin_metadata[0]['key'], 'readonly')
|
self.assertEqual('readonly', admin_metadata[0]['key'])
|
||||||
self.assertEqual(admin_metadata[0]['value'], 'True')
|
self.assertEqual('True', admin_metadata[0]['value'])
|
||||||
|
|
||||||
@mock.patch.object(cinder.volume.api.API, 'update')
|
@mock.patch.object(cinder.volume.api.API, 'update')
|
||||||
@mock.patch.object(db, 'volume_get')
|
@mock.patch.object(db, 'volume_get')
|
||||||
@ -2022,7 +2349,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
def test_reserve_volume_bad_status(self):
|
def test_reserve_volume_bad_status(self):
|
||||||
fake_volume = {
|
fake_volume = {
|
||||||
'id': FAKE_UUID,
|
'id': FAKE_UUID,
|
||||||
'status': 'in-use'
|
'status': 'attaching'
|
||||||
}
|
}
|
||||||
|
|
||||||
with mock.patch.object(db, 'volume_get') as mock_volume_get:
|
with mock.patch.object(db, 'volume_get') as mock_volume_get:
|
||||||
@ -2033,20 +2360,31 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
fake_volume)
|
fake_volume)
|
||||||
self.assertTrue(mock_volume_get.called)
|
self.assertTrue(mock_volume_get.called)
|
||||||
|
|
||||||
def test_unreserve_volume_success(self):
|
@mock.patch.object(db, 'volume_get')
|
||||||
|
@mock.patch.object(db, 'volume_attachment_get_used_by_volume_id')
|
||||||
|
@mock.patch.object(cinder.volume.api.API, 'update')
|
||||||
|
def test_unreserve_volume_success(self, volume_get,
|
||||||
|
volume_attachment_get_used_by_volume_id,
|
||||||
|
volume_update):
|
||||||
fake_volume = {
|
fake_volume = {
|
||||||
'id': FAKE_UUID,
|
'id': FAKE_UUID,
|
||||||
'status': 'attaching'
|
'status': 'attaching'
|
||||||
}
|
}
|
||||||
|
fake_attachments = [{'volume_id': FAKE_UUID,
|
||||||
|
'instance_uuid': 'fake_instance_uuid'}]
|
||||||
|
|
||||||
|
volume_get.return_value = fake_volume
|
||||||
|
volume_attachment_get_used_by_volume_id.return_value = fake_attachments
|
||||||
|
volume_update.return_value = fake_volume
|
||||||
|
|
||||||
with mock.patch.object(cinder.volume.api.API,
|
|
||||||
'update') as mock_volume_update:
|
|
||||||
mock_volume_update.return_value = fake_volume
|
|
||||||
self.assertIsNone(cinder.volume.api.API().unreserve_volume(
|
self.assertIsNone(cinder.volume.api.API().unreserve_volume(
|
||||||
self.context,
|
self.context,
|
||||||
fake_volume
|
fake_volume
|
||||||
))
|
))
|
||||||
self.assertTrue(mock_volume_update.called)
|
|
||||||
|
self.assertTrue(volume_get.called)
|
||||||
|
self.assertTrue(volume_attachment_get_used_by_volume_id.called)
|
||||||
|
self.assertTrue(volume_update.called)
|
||||||
|
|
||||||
def test_concurrent_volumes_get_different_targets(self):
|
def test_concurrent_volumes_get_different_targets(self):
|
||||||
"""Ensure multiple concurrent volumes get different targets."""
|
"""Ensure multiple concurrent volumes get different targets."""
|
||||||
@ -2350,7 +2688,11 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
# create volume and attach to the instance
|
# create volume and attach to the instance
|
||||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||||
self.volume.create_volume(self.context, volume['id'])
|
self.volume.create_volume(self.context, volume['id'])
|
||||||
db.volume_attached(self.context, volume['id'], instance_uuid,
|
values = {'volume_id': volume['id'],
|
||||||
|
'instance_uuid': instance_uuid,
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
attachment = db.volume_attach(self.context, values)
|
||||||
|
db.volume_attached(self.context, attachment['id'], instance_uuid,
|
||||||
None, '/dev/sda1')
|
None, '/dev/sda1')
|
||||||
|
|
||||||
volume_api = cinder.volume.api.API()
|
volume_api = cinder.volume.api.API()
|
||||||
@ -2369,7 +2711,11 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
# create volume and attach to the host
|
# create volume and attach to the host
|
||||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||||
self.volume.create_volume(self.context, volume['id'])
|
self.volume.create_volume(self.context, volume['id'])
|
||||||
db.volume_attached(self.context, volume['id'], None,
|
values = {'volume_id': volume['id'],
|
||||||
|
'attached_host': 'fake_host',
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
attachment = db.volume_attach(self.context, values)
|
||||||
|
db.volume_attached(self.context, attachment['id'], None,
|
||||||
'fake_host', '/dev/sda1')
|
'fake_host', '/dev/sda1')
|
||||||
|
|
||||||
volume_api = cinder.volume.api.API()
|
volume_api = cinder.volume.api.API()
|
||||||
@ -2827,8 +3173,11 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
|
|
||||||
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
instance_uuid = '12345678-1234-5678-1234-567812345678'
|
||||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||||
|
attachment = db.volume_attach(self.context,
|
||||||
|
{'volume_id': volume['id'],
|
||||||
|
'attached_host': 'fake-host'})
|
||||||
volume = db.volume_attached(
|
volume = db.volume_attached(
|
||||||
self.context, volume['id'], instance_uuid, 'fake-host', 'vdb')
|
self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb')
|
||||||
volume_api = cinder.volume.api.API()
|
volume_api = cinder.volume.api.API()
|
||||||
volume_api.begin_detaching(self.context, volume)
|
volume_api.begin_detaching(self.context, volume)
|
||||||
volume = db.volume_get(self.context, volume['id'])
|
volume = db.volume_get(self.context, volume['id'])
|
||||||
@ -3358,9 +3707,12 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
old_volume = tests_utils.create_volume(self.context, size=0,
|
old_volume = tests_utils.create_volume(self.context, size=0,
|
||||||
host=CONF.host,
|
host=CONF.host,
|
||||||
status=initial_status,
|
status=initial_status,
|
||||||
migration_status='migrating',
|
migration_status='migrating')
|
||||||
instance_uuid=instance_uuid,
|
if status == 'in-use':
|
||||||
attached_host=attached_host)
|
vol = tests_utils.attach_volume(self.context, old_volume['id'],
|
||||||
|
instance_uuid, attached_host,
|
||||||
|
'/dev/vda')
|
||||||
|
self.assertEqual(vol['status'], 'in-use')
|
||||||
target_status = 'target:%s' % old_volume['id']
|
target_status = 'target:%s' % old_volume['id']
|
||||||
new_volume = tests_utils.create_volume(self.context, size=0,
|
new_volume = tests_utils.create_volume(self.context, size=0,
|
||||||
host=CONF.host,
|
host=CONF.host,
|
||||||
@ -3381,15 +3733,16 @@ class VolumeTestCase(BaseVolumeTestCase):
|
|||||||
self.stubs.Set(self.volume.driver, 'attach_volume',
|
self.stubs.Set(self.volume.driver, 'attach_volume',
|
||||||
lambda *args, **kwargs: None)
|
lambda *args, **kwargs: None)
|
||||||
|
|
||||||
with mock.patch.object(self.volume.driver, 'detach_volume') as detach:
|
with mock.patch.object(self.volume.driver, 'detach_volume'):
|
||||||
self.volume.migrate_volume_completion(self.context, old_volume[
|
self.volume.migrate_volume_completion(self.context, old_volume[
|
||||||
'id'], new_volume['id'])
|
'id'], new_volume['id'])
|
||||||
|
|
||||||
volume = db.volume_get(elevated, old_volume['id'])
|
if status == 'in-use':
|
||||||
self.assertEqual(volume['status'], status)
|
attachment = db.volume_attachment_get_by_instance_uuid(
|
||||||
self.assertEqual(volume['attached_host'], attached_host)
|
self.context, old_volume['id'], instance_uuid)
|
||||||
self.assertEqual(volume['instance_uuid'], instance_uuid)
|
self.assertIsNotNone(attachment)
|
||||||
self.assertEqual(status == 'in-use', detach.called)
|
self.assertEqual(attachment['attached_host'], attached_host)
|
||||||
|
self.assertEqual(attachment['instance_uuid'], instance_uuid)
|
||||||
|
|
||||||
def test_migrate_volume_completion_retype_available(self):
|
def test_migrate_volume_completion_retype_available(self):
|
||||||
self._test_migrate_volume_completion('available', retyping=True)
|
self._test_migrate_volume_completion('available', retyping=True)
|
||||||
@ -4072,8 +4425,6 @@ class CopyVolumeToImageTestCase(BaseVolumeTestCase):
|
|||||||
def test_copy_volume_to_image_status_use(self):
|
def test_copy_volume_to_image_status_use(self):
|
||||||
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
|
self.image_meta['id'] = 'a440c04b-79fa-479c-bed1-0b816eaec379'
|
||||||
# creating volume testdata
|
# creating volume testdata
|
||||||
self.volume_attrs['instance_uuid'] = 'b21f957d-a72f-4b93-b5a5-' \
|
|
||||||
'45b1161abb02'
|
|
||||||
db.volume_create(self.context, self.volume_attrs)
|
db.volume_create(self.context, self.volume_attrs)
|
||||||
|
|
||||||
# start test
|
# start test
|
||||||
@ -4082,7 +4433,7 @@ class CopyVolumeToImageTestCase(BaseVolumeTestCase):
|
|||||||
self.image_meta)
|
self.image_meta)
|
||||||
|
|
||||||
volume = db.volume_get(self.context, self.volume_id)
|
volume = db.volume_get(self.context, self.volume_id)
|
||||||
self.assertEqual(volume['status'], 'in-use')
|
self.assertEqual(volume['status'], 'available')
|
||||||
|
|
||||||
def test_copy_volume_to_image_exception(self):
|
def test_copy_volume_to_image_exception(self):
|
||||||
self.image_meta['id'] = FAKE_UUID
|
self.image_meta['id'] = FAKE_UUID
|
||||||
|
@ -221,7 +221,9 @@ class VolumeRpcAPITestCase(test.TestCase):
|
|||||||
def test_detach_volume(self):
|
def test_detach_volume(self):
|
||||||
self._test_volume_api('detach_volume',
|
self._test_volume_api('detach_volume',
|
||||||
rpc_method='call',
|
rpc_method='call',
|
||||||
volume=self.fake_volume)
|
volume=self.fake_volume,
|
||||||
|
attachment_id='fake_uuid',
|
||||||
|
version="1.20")
|
||||||
|
|
||||||
def test_copy_volume_to_image(self):
|
def test_copy_volume_to_image(self):
|
||||||
self._test_volume_api('copy_volume_to_image',
|
self._test_volume_api('copy_volume_to_image',
|
||||||
|
@ -17,6 +17,8 @@ from cinder import context
|
|||||||
from cinder import db
|
from cinder import db
|
||||||
from cinder.openstack.common import loopingcall
|
from cinder.openstack.common import loopingcall
|
||||||
|
|
||||||
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
|
|
||||||
def get_test_admin_context():
|
def get_test_admin_context():
|
||||||
return context.get_admin_context()
|
return context.get_admin_context()
|
||||||
@ -61,6 +63,21 @@ def create_volume(ctxt,
|
|||||||
return db.volume_create(ctxt, vol)
|
return db.volume_create(ctxt, vol)
|
||||||
|
|
||||||
|
|
||||||
|
def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
|
||||||
|
mountpoint, mode='rw'):
|
||||||
|
|
||||||
|
now = timeutils.utcnow()
|
||||||
|
values = {}
|
||||||
|
values['volume_id'] = volume_id
|
||||||
|
values['attached_host'] = attached_host
|
||||||
|
values['mountpoint'] = mountpoint
|
||||||
|
values['attach_time'] = now
|
||||||
|
|
||||||
|
attachment = db.volume_attach(ctxt, values)
|
||||||
|
return db.volume_attached(ctxt, attachment['id'], instance_uuid,
|
||||||
|
attached_host, mountpoint, mode)
|
||||||
|
|
||||||
|
|
||||||
def create_snapshot(ctxt,
|
def create_snapshot(ctxt,
|
||||||
volume_id,
|
volume_id,
|
||||||
display_name='test_snapshot',
|
display_name='test_snapshot',
|
||||||
|
@ -162,7 +162,7 @@ class API(base.Base):
|
|||||||
availability_zone=None, source_volume=None,
|
availability_zone=None, source_volume=None,
|
||||||
scheduler_hints=None,
|
scheduler_hints=None,
|
||||||
source_replica=None, consistencygroup=None,
|
source_replica=None, consistencygroup=None,
|
||||||
cgsnapshot=None):
|
cgsnapshot=None, multiattach=False):
|
||||||
|
|
||||||
# NOTE(jdg): we can have a create without size if we're
|
# NOTE(jdg): we can have a create without size if we're
|
||||||
# doing a create from snap or volume. Currently
|
# doing a create from snap or volume. Currently
|
||||||
@ -237,6 +237,7 @@ class API(base.Base):
|
|||||||
'optional_args': {'is_quota_committed': False},
|
'optional_args': {'is_quota_committed': False},
|
||||||
'consistencygroup': consistencygroup,
|
'consistencygroup': consistencygroup,
|
||||||
'cgsnapshot': cgsnapshot,
|
'cgsnapshot': cgsnapshot,
|
||||||
|
'multiattach': multiattach,
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
if cgsnapshot:
|
if cgsnapshot:
|
||||||
@ -480,6 +481,13 @@ class API(base.Base):
|
|||||||
volume = self.db.volume_get(context, volume['id'])
|
volume = self.db.volume_get(context, volume['id'])
|
||||||
if volume['status'] == 'available':
|
if volume['status'] == 'available':
|
||||||
self.update(context, volume, {"status": "attaching"})
|
self.update(context, volume, {"status": "attaching"})
|
||||||
|
elif volume['status'] == 'in-use':
|
||||||
|
if volume['multiattach']:
|
||||||
|
self.update(context, volume, {"status": "attaching"})
|
||||||
|
else:
|
||||||
|
msg = _("Volume must be multiattachable to reserve again.")
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidVolume(reason=msg)
|
||||||
else:
|
else:
|
||||||
msg = _("Volume status must be available to reserve.")
|
msg = _("Volume status must be available to reserve.")
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
@ -487,7 +495,13 @@ class API(base.Base):
|
|||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def unreserve_volume(self, context, volume):
|
def unreserve_volume(self, context, volume):
|
||||||
if volume['status'] == "attaching":
|
volume = self.db.volume_get(context, volume['id'])
|
||||||
|
if volume['status'] == 'attaching':
|
||||||
|
attaches = self.db.volume_attachment_get_used_by_volume_id(
|
||||||
|
context, volume['id'])
|
||||||
|
if attaches:
|
||||||
|
self.update(context, volume, {"status": "in-use"})
|
||||||
|
else:
|
||||||
self.update(context, volume, {"status": "available"})
|
self.update(context, volume, {"status": "available"})
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
@ -538,8 +552,9 @@ class API(base.Base):
|
|||||||
mode)
|
mode)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def detach(self, context, volume):
|
def detach(self, context, volume, attachment_id):
|
||||||
return self.volume_rpcapi.detach_volume(context, volume)
|
return self.volume_rpcapi.detach_volume(context, volume,
|
||||||
|
attachment_id)
|
||||||
|
|
||||||
@wrap_check_policy
|
@wrap_check_policy
|
||||||
def initialize_connection(self, context, volume, connector):
|
def initialize_connection(self, context, volume, connector):
|
||||||
|
@ -771,7 +771,7 @@ class BaseVD(object):
|
|||||||
"""Callback for volume attached to instance or host."""
|
"""Callback for volume attached to instance or host."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def detach_volume(self, context, volume):
|
def detach_volume(self, context, volume, attachment=None):
|
||||||
"""Callback for volume detached."""
|
"""Callback for volume detached."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ class DateraDriver(san.SanISCSIDriver):
|
|||||||
def create_export(self, context, volume):
|
def create_export(self, context, volume):
|
||||||
return self._do_export(context, volume)
|
return self._do_export(context, volume)
|
||||||
|
|
||||||
def detach_volume(self, context, volume):
|
def detach_volume(self, context, volume, attachment=None):
|
||||||
try:
|
try:
|
||||||
self._issue_api_request('volumes', 'delete', resource=volume['id'],
|
self._issue_api_request('volumes', 'delete', resource=volume['id'],
|
||||||
action='export')
|
action='export')
|
||||||
|
@ -169,10 +169,11 @@ class HP3PARCommon(object):
|
|||||||
2.0.36 - Added support for dedup provisioning
|
2.0.36 - Added support for dedup provisioning
|
||||||
2.0.37 - Added support for enabling Flash Cache
|
2.0.37 - Added support for enabling Flash Cache
|
||||||
2.0.38 - Add stats for hp3par goodness_function and filter_function
|
2.0.38 - Add stats for hp3par goodness_function and filter_function
|
||||||
|
2.0.39 - Added support for updated detach_volume attachment.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "2.0.38"
|
VERSION = "2.0.39"
|
||||||
|
|
||||||
stats = {}
|
stats = {}
|
||||||
|
|
||||||
@ -1537,6 +1538,11 @@ class HP3PARCommon(object):
|
|||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
def attach_volume(self, volume, instance_uuid):
|
def attach_volume(self, volume, instance_uuid):
|
||||||
|
"""Save the instance UUID in the volume.
|
||||||
|
|
||||||
|
TODO: add support for multi-attach
|
||||||
|
|
||||||
|
"""
|
||||||
LOG.debug("Attach Volume\n%s", pprint.pformat(volume))
|
LOG.debug("Attach Volume\n%s", pprint.pformat(volume))
|
||||||
try:
|
try:
|
||||||
self.update_volume_key_value_pair(volume,
|
self.update_volume_key_value_pair(volume,
|
||||||
@ -1546,7 +1552,12 @@ class HP3PARCommon(object):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Error attaching volume %s"), volume)
|
LOG.error(_LE("Error attaching volume %s"), volume)
|
||||||
|
|
||||||
def detach_volume(self, volume):
|
def detach_volume(self, volume, attachment=None):
|
||||||
|
"""Remove the instance uuid from the volume.
|
||||||
|
|
||||||
|
TODO: add support for multi-attach.
|
||||||
|
|
||||||
|
"""
|
||||||
LOG.debug("Detach Volume\n%s", pprint.pformat(volume))
|
LOG.debug("Detach Volume\n%s", pprint.pformat(volume))
|
||||||
try:
|
try:
|
||||||
self.clear_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid')
|
self.clear_volume_key_value_pair(volume, 'HPQ-CS-instance_uuid')
|
||||||
|
@ -75,10 +75,11 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
|
|||||||
2.0.12 - Fix queryHost call to specify wwns bug #1398206
|
2.0.12 - Fix queryHost call to specify wwns bug #1398206
|
||||||
2.0.13 - Fix missing host name during attach bug #1398206
|
2.0.13 - Fix missing host name during attach bug #1398206
|
||||||
2.0.14 - Removed usage of host name cache #1398914
|
2.0.14 - Removed usage of host name cache #1398914
|
||||||
|
2.0.15 - Added support for updated detach_volume attachment.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "2.0.14"
|
VERSION = "2.0.15"
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(HP3PARFCDriver, self).__init__(*args, **kwargs)
|
super(HP3PARFCDriver, self).__init__(*args, **kwargs)
|
||||||
@ -438,10 +439,10 @@ class HP3PARFCDriver(cinder.volume.driver.FibreChannelDriver):
|
|||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
def detach_volume(self, context, volume):
|
def detach_volume(self, context, volume, attachment=None):
|
||||||
common = self._login()
|
common = self._login()
|
||||||
try:
|
try:
|
||||||
common.detach_volume(volume)
|
common.detach_volume(volume, attachment)
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
|
@ -82,10 +82,11 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
2.0.13 - Update LOG usage to fix translations. bug #1384312
|
2.0.13 - Update LOG usage to fix translations. bug #1384312
|
||||||
2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be
|
2.0.14 - Do not allow a different iSCSI IP (hp3par_iscsi_ips) to be
|
||||||
used during live-migration. bug #1423958
|
used during live-migration. bug #1423958
|
||||||
|
2.0.15 - Added support for updated detach_volume attachment.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "2.0.14"
|
VERSION = "2.0.15"
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(HP3PARISCSIDriver, self).__init__(*args, **kwargs)
|
super(HP3PARISCSIDriver, self).__init__(*args, **kwargs)
|
||||||
@ -691,10 +692,10 @@ class HP3PARISCSIDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
def detach_volume(self, context, volume):
|
def detach_volume(self, context, volume, attachment=None):
|
||||||
common = self._login()
|
common = self._login()
|
||||||
try:
|
try:
|
||||||
common.detach_volume(volume)
|
common.detach_volume(volume, attachment)
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ class ScalityDriver(driver.VolumeDriver):
|
|||||||
"""Disallow connection from connector."""
|
"""Disallow connection from connector."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def detach_volume(self, context, volume):
|
def detach_volume(self, context, volume, attachment=None):
|
||||||
"""Callback for volume detached."""
|
"""Callback for volume detached."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -923,7 +923,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
|||||||
if 'result' not in data:
|
if 'result' not in data:
|
||||||
raise exception.SolidFireAPIDataException(data=data)
|
raise exception.SolidFireAPIDataException(data=data)
|
||||||
|
|
||||||
def detach_volume(self, context, volume):
|
def detach_volume(self, context, volume, attachment=None):
|
||||||
|
|
||||||
LOG.debug("Entering SolidFire attach_volume...")
|
LOG.debug("Entering SolidFire attach_volume...")
|
||||||
sfaccount = self._get_sfaccount(volume['project_id'])
|
sfaccount = self._get_sfaccount(volume['project_id'])
|
||||||
|
@ -482,7 +482,7 @@ class EntryCreateTask(flow_utils.CinderTask):
|
|||||||
'name', 'reservations', 'size', 'snapshot_id',
|
'name', 'reservations', 'size', 'snapshot_id',
|
||||||
'source_volid', 'volume_type_id', 'encryption_key_id',
|
'source_volid', 'volume_type_id', 'encryption_key_id',
|
||||||
'source_replicaid', 'consistencygroup_id',
|
'source_replicaid', 'consistencygroup_id',
|
||||||
'cgsnapshot_id', ]
|
'cgsnapshot_id', 'multiattach']
|
||||||
super(EntryCreateTask, self).__init__(addons=[ACTION],
|
super(EntryCreateTask, self).__init__(addons=[ACTION],
|
||||||
requires=requires)
|
requires=requires)
|
||||||
self.db = db
|
self.db = db
|
||||||
@ -508,6 +508,7 @@ class EntryCreateTask(flow_utils.CinderTask):
|
|||||||
'display_description': kwargs.pop('description'),
|
'display_description': kwargs.pop('description'),
|
||||||
'display_name': kwargs.pop('name'),
|
'display_name': kwargs.pop('name'),
|
||||||
'replication_status': 'disabled',
|
'replication_status': 'disabled',
|
||||||
|
'multiattach': kwargs.pop('multiattach'),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Merge in the other required arguments which should provide the rest
|
# Merge in the other required arguments which should provide the rest
|
||||||
|
@ -137,6 +137,25 @@ def locked_volume_operation(f):
|
|||||||
return lvo_inner1
|
return lvo_inner1
|
||||||
|
|
||||||
|
|
||||||
|
def locked_detach_operation(f):
|
||||||
|
"""Lock decorator for volume detach operations.
|
||||||
|
|
||||||
|
Takes a named lock prior to executing the detach call. The lock is named
|
||||||
|
with the operation executed and the id of the volume. This lock can then
|
||||||
|
be used by other operations to avoid operation conflicts on shared volumes.
|
||||||
|
|
||||||
|
This locking mechanism is only for detach calls. We can't use the
|
||||||
|
locked_volume_operation, because detach requires an additional
|
||||||
|
attachment_id in the parameter list.
|
||||||
|
"""
|
||||||
|
def ldo_inner1(inst, context, volume_id, attachment_id=None, **kwargs):
|
||||||
|
@utils.synchronized("%s-%s" % (volume_id, f.__name__), external=True)
|
||||||
|
def ldo_inner2(*_args, **_kwargs):
|
||||||
|
return f(*_args, **_kwargs)
|
||||||
|
return ldo_inner2(inst, context, volume_id, attachment_id, **kwargs)
|
||||||
|
return ldo_inner1
|
||||||
|
|
||||||
|
|
||||||
def locked_snapshot_operation(f):
|
def locked_snapshot_operation(f):
|
||||||
"""Lock decorator for snapshot operations.
|
"""Lock decorator for snapshot operations.
|
||||||
|
|
||||||
@ -162,7 +181,7 @@ def locked_snapshot_operation(f):
|
|||||||
class VolumeManager(manager.SchedulerDependentManager):
|
class VolumeManager(manager.SchedulerDependentManager):
|
||||||
"""Manages attachable block storage devices."""
|
"""Manages attachable block storage devices."""
|
||||||
|
|
||||||
RPC_API_VERSION = '1.22'
|
RPC_API_VERSION = '1.23'
|
||||||
|
|
||||||
target = messaging.Target(version=RPC_API_VERSION)
|
target = messaging.Target(version=RPC_API_VERSION)
|
||||||
|
|
||||||
@ -692,45 +711,46 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
volume_metadata = self.db.volume_admin_metadata_get(
|
volume_metadata = self.db.volume_admin_metadata_get(
|
||||||
context.elevated(), volume_id)
|
context.elevated(), volume_id)
|
||||||
if volume['status'] == 'attaching':
|
if volume['status'] == 'attaching':
|
||||||
if (volume['instance_uuid'] and volume['instance_uuid'] !=
|
|
||||||
instance_uuid):
|
|
||||||
msg = _("being attached by another instance")
|
|
||||||
raise exception.InvalidVolume(reason=msg)
|
|
||||||
if (volume['attached_host'] and volume['attached_host'] !=
|
|
||||||
host_name):
|
|
||||||
msg = _("being attached by another host")
|
|
||||||
raise exception.InvalidVolume(reason=msg)
|
|
||||||
if (volume_metadata.get('attached_mode') and
|
if (volume_metadata.get('attached_mode') and
|
||||||
volume_metadata.get('attached_mode') != mode):
|
volume_metadata.get('attached_mode') != mode):
|
||||||
msg = _("being attached by different mode")
|
msg = _("being attached by different mode")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
elif (not volume['migration_status'] and
|
|
||||||
volume['status'] != "available"):
|
if (volume['status'] == 'in-use' and not volume['multiattach']
|
||||||
msg = _("status must be available or attaching")
|
and not volume['migration_status']):
|
||||||
|
msg = _("volume is already attached")
|
||||||
raise exception.InvalidVolume(reason=msg)
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
|
||||||
# TODO(jdg): attach_time column is currently varchar
|
attachment = None
|
||||||
# we should update this to a date-time object
|
|
||||||
# also consider adding detach_time?
|
|
||||||
self._notify_about_volume_usage(context, volume,
|
|
||||||
"attach.start")
|
|
||||||
self.db.volume_update(context, volume_id,
|
|
||||||
{"instance_uuid": instance_uuid,
|
|
||||||
"attached_host": host_name,
|
|
||||||
"status": "attaching",
|
|
||||||
"attach_time": timeutils.strtime()})
|
|
||||||
self.db.volume_admin_metadata_update(context.elevated(),
|
|
||||||
volume_id,
|
|
||||||
{"attached_mode": mode},
|
|
||||||
False)
|
|
||||||
|
|
||||||
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
|
|
||||||
self.db.volume_update(context, volume_id,
|
|
||||||
{'status': 'error_attaching'})
|
|
||||||
raise exception.InvalidUUID(uuid=instance_uuid)
|
|
||||||
|
|
||||||
host_name_sanitized = utils.sanitize_hostname(
|
host_name_sanitized = utils.sanitize_hostname(
|
||||||
host_name) if host_name else None
|
host_name) if host_name else None
|
||||||
|
if instance_uuid:
|
||||||
|
attachment = \
|
||||||
|
self.db.volume_attachment_get_by_instance_uuid(
|
||||||
|
context, volume_id, instance_uuid)
|
||||||
|
else:
|
||||||
|
attachment = \
|
||||||
|
self.db.volume_attachment_get_by_host(context, volume_id,
|
||||||
|
host_name_sanitized)
|
||||||
|
if attachment is not None:
|
||||||
|
return
|
||||||
|
|
||||||
|
self._notify_about_volume_usage(context, volume,
|
||||||
|
"attach.start")
|
||||||
|
values = {'volume_id': volume_id,
|
||||||
|
'attach_status': 'attaching', }
|
||||||
|
|
||||||
|
attachment = self.db.volume_attach(context.elevated(), values)
|
||||||
|
volume_metadata = self.db.volume_admin_metadata_update(
|
||||||
|
context.elevated(), volume_id,
|
||||||
|
{"attached_mode": mode}, False)
|
||||||
|
|
||||||
|
attachment_id = attachment['id']
|
||||||
|
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
|
||||||
|
self.db.volume_attachment_update(context, attachment_id,
|
||||||
|
{'attach_status':
|
||||||
|
'error_attaching'})
|
||||||
|
raise exception.InvalidUUID(uuid=instance_uuid)
|
||||||
|
|
||||||
volume = self.db.volume_get(context, volume_id)
|
volume = self.db.volume_get(context, volume_id)
|
||||||
|
|
||||||
@ -739,6 +759,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
{'status': 'error_attaching'})
|
{'status': 'error_attaching'})
|
||||||
raise exception.InvalidVolumeAttachMode(mode=mode,
|
raise exception.InvalidVolumeAttachMode(mode=mode,
|
||||||
volume_id=volume_id)
|
volume_id=volume_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# NOTE(flaper87): Verify the driver is enabled
|
# NOTE(flaper87): Verify the driver is enabled
|
||||||
# before going forward. The exception will be caught
|
# before going forward. The exception will be caught
|
||||||
@ -752,24 +773,55 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
mountpoint)
|
mountpoint)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.db.volume_update(context, volume_id,
|
self.db.volume_attachment_update(
|
||||||
{'status': 'error_attaching'})
|
context, attachment_id,
|
||||||
|
{'attach_status': 'error_attaching'})
|
||||||
|
|
||||||
volume = self.db.volume_attached(context.elevated(),
|
volume = self.db.volume_attached(context.elevated(),
|
||||||
volume_id,
|
attachment_id,
|
||||||
instance_uuid,
|
instance_uuid,
|
||||||
host_name_sanitized,
|
host_name_sanitized,
|
||||||
mountpoint)
|
mountpoint,
|
||||||
|
mode)
|
||||||
if volume['migration_status']:
|
if volume['migration_status']:
|
||||||
self.db.volume_update(context, volume_id,
|
self.db.volume_update(context, volume_id,
|
||||||
{'migration_status': None})
|
{'migration_status': None})
|
||||||
self._notify_about_volume_usage(context, volume, "attach.end")
|
self._notify_about_volume_usage(context, volume, "attach.end")
|
||||||
|
return self.db.volume_attachment_get(context, attachment_id)
|
||||||
return do_attach()
|
return do_attach()
|
||||||
|
|
||||||
@locked_volume_operation
|
@locked_detach_operation
|
||||||
def detach_volume(self, context, volume_id):
|
def detach_volume(self, context, volume_id, attachment_id=None):
|
||||||
"""Updates db to show volume is detached."""
|
"""Updates db to show volume is detached."""
|
||||||
# TODO(vish): refactor this into a more general "unreserve"
|
# TODO(vish): refactor this into a more general "unreserve"
|
||||||
|
attachment = None
|
||||||
|
if attachment_id:
|
||||||
|
try:
|
||||||
|
attachment = self.db.volume_attachment_get(context,
|
||||||
|
attachment_id)
|
||||||
|
except exception.VolumeAttachmentNotFound:
|
||||||
|
LOG.error(_LE("We couldn't find the volume attachment"
|
||||||
|
" for volume %(volume_id)s and"
|
||||||
|
" attachment id %(id)s"),
|
||||||
|
{"volume_id": volume_id,
|
||||||
|
"id": attachment_id})
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
# We can try and degrade gracefuly here by trying to detach
|
||||||
|
# a volume without the attachment_id here if the volume only has
|
||||||
|
# one attachment. This is for backwards compatibility.
|
||||||
|
attachments = self.db.volume_attachment_get_used_by_volume_id(
|
||||||
|
context, volume_id)
|
||||||
|
if len(attachments) > 1:
|
||||||
|
# There are more than 1 attachments for this volume
|
||||||
|
# we have to have an attachment id.
|
||||||
|
msg = _("Volume %(id)s is attached to more than one instance"
|
||||||
|
". A valid attachment_id must be passed to detach"
|
||||||
|
" this volume") % {'id': volume_id}
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exception.InvalidVolume(reason=msg)
|
||||||
|
else:
|
||||||
|
attachment = attachments[0]
|
||||||
|
|
||||||
volume = self.db.volume_get(context, volume_id)
|
volume = self.db.volume_get(context, volume_id)
|
||||||
self._notify_about_volume_usage(context, volume, "detach.start")
|
self._notify_about_volume_usage(context, volume, "detach.start")
|
||||||
@ -779,14 +831,15 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
# and the volume status updated.
|
# and the volume status updated.
|
||||||
utils.require_driver_initialized(self.driver)
|
utils.require_driver_initialized(self.driver)
|
||||||
|
|
||||||
self.driver.detach_volume(context, volume)
|
self.driver.detach_volume(context, volume, attachment)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.db.volume_update(context,
|
self.db.volume_attachment_update(
|
||||||
volume_id,
|
context, attachment.get('id'),
|
||||||
{'status': 'error_detaching'})
|
{'attach_status': 'error_detaching'})
|
||||||
|
|
||||||
self.db.volume_detached(context.elevated(), volume_id)
|
self.db.volume_detached(context.elevated(), volume_id,
|
||||||
|
attachment.get('id'))
|
||||||
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
|
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
|
||||||
'attached_mode')
|
'attached_mode')
|
||||||
|
|
||||||
@ -851,8 +904,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
payload['message'] = unicode(error)
|
payload['message'] = unicode(error)
|
||||||
finally:
|
finally:
|
||||||
if (volume['instance_uuid'] is None and
|
if not volume['volume_attachment']:
|
||||||
volume['attached_host'] is None):
|
|
||||||
self.db.volume_update(context, volume_id,
|
self.db.volume_update(context, volume_id,
|
||||||
{'status': 'available'})
|
{'status': 'available'})
|
||||||
else:
|
else:
|
||||||
@ -1144,8 +1196,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
# Copy the source volume to the destination volume
|
# Copy the source volume to the destination volume
|
||||||
try:
|
try:
|
||||||
if (volume['instance_uuid'] is None and
|
attachments = volume['volume_attachment']
|
||||||
volume['attached_host'] is None):
|
if not attachments:
|
||||||
self.driver.copy_volume_data(ctxt, volume, new_volume,
|
self.driver.copy_volume_data(ctxt, volume, new_volume,
|
||||||
remote='dest')
|
remote='dest')
|
||||||
# The above call is synchronous so we complete the migration
|
# The above call is synchronous so we complete the migration
|
||||||
@ -1156,8 +1208,11 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
nova_api = compute.API()
|
nova_api = compute.API()
|
||||||
# This is an async call to Nova, which will call the completion
|
# This is an async call to Nova, which will call the completion
|
||||||
# when it's done
|
# when it's done
|
||||||
nova_api.update_server_volume(ctxt, volume['instance_uuid'],
|
for attachment in attachments:
|
||||||
volume['id'], new_volume['id'])
|
instance_uuid = attachment['instance_uuid']
|
||||||
|
nova_api.update_server_volume(ctxt, instance_uuid,
|
||||||
|
volume['id'],
|
||||||
|
new_volume['id'])
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
|
msg = _("Failed to copy volume %(vol1)s to %(vol2)s")
|
||||||
@ -1190,8 +1245,8 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
{'vol': new_volume['id']})
|
{'vol': new_volume['id']})
|
||||||
|
|
||||||
def _get_original_status(self, volume):
|
def _get_original_status(self, volume):
|
||||||
if (volume['instance_uuid'] is None and
|
attachments = volume['volume_attachment']
|
||||||
volume['attached_host'] is None):
|
if not attachments:
|
||||||
return 'available'
|
return 'available'
|
||||||
else:
|
else:
|
||||||
return 'in-use'
|
return 'in-use'
|
||||||
@ -1255,11 +1310,12 @@ class VolumeManager(manager.SchedulerDependentManager):
|
|||||||
self.db.volume_update(ctxt, volume_id, updates)
|
self.db.volume_update(ctxt, volume_id, updates)
|
||||||
|
|
||||||
if orig_volume_status == 'in-use':
|
if orig_volume_status == 'in-use':
|
||||||
rpcapi.attach_volume(ctxt,
|
attachments = volume['volume_attachment']
|
||||||
volume,
|
for attachment in attachments:
|
||||||
volume['instance_uuid'],
|
rpcapi.attach_volume(ctxt, volume,
|
||||||
volume['attached_host'],
|
attachment['instance_uuid'],
|
||||||
volume['mountpoint'],
|
attachment['attached_host'],
|
||||||
|
attachment['mountpoint'],
|
||||||
'rw')
|
'rw')
|
||||||
return volume['id']
|
return volume['id']
|
||||||
|
|
||||||
|
@ -63,6 +63,7 @@ class VolumeAPI(object):
|
|||||||
and delete_snapshot()
|
and delete_snapshot()
|
||||||
1.21 - Adds update_consistencygroup.
|
1.21 - Adds update_consistencygroup.
|
||||||
1.22 - Adds create_consistencygroup_from_src.
|
1.22 - Adds create_consistencygroup_from_src.
|
||||||
|
1.23 - Adds attachment_id to detach_volume
|
||||||
'''
|
'''
|
||||||
|
|
||||||
BASE_RPC_API_VERSION = '1.0'
|
BASE_RPC_API_VERSION = '1.0'
|
||||||
@ -72,7 +73,7 @@ class VolumeAPI(object):
|
|||||||
target = messaging.Target(topic=CONF.volume_topic,
|
target = messaging.Target(topic=CONF.volume_topic,
|
||||||
version=self.BASE_RPC_API_VERSION)
|
version=self.BASE_RPC_API_VERSION)
|
||||||
serializer = objects_base.CinderObjectSerializer()
|
serializer = objects_base.CinderObjectSerializer()
|
||||||
self.client = rpc.get_client(target, '1.22', serializer=serializer)
|
self.client = rpc.get_client(target, '1.23', serializer=serializer)
|
||||||
|
|
||||||
def create_consistencygroup(self, ctxt, group, host):
|
def create_consistencygroup(self, ctxt, group, host):
|
||||||
new_host = utils.extract_host(host)
|
new_host = utils.extract_host(host)
|
||||||
@ -171,10 +172,11 @@ class VolumeAPI(object):
|
|||||||
mountpoint=mountpoint,
|
mountpoint=mountpoint,
|
||||||
mode=mode)
|
mode=mode)
|
||||||
|
|
||||||
def detach_volume(self, ctxt, volume):
|
def detach_volume(self, ctxt, volume, attachment_id):
|
||||||
new_host = utils.extract_host(volume['host'])
|
new_host = utils.extract_host(volume['host'])
|
||||||
cctxt = self.client.prepare(server=new_host)
|
cctxt = self.client.prepare(server=new_host, version='1.20')
|
||||||
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'])
|
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'],
|
||||||
|
attachment_id=attachment_id)
|
||||||
|
|
||||||
def copy_volume_to_image(self, ctxt, volume, image_meta):
|
def copy_volume_to_image(self, ctxt, volume, image_meta):
|
||||||
new_host = utils.extract_host(volume['host'])
|
new_host = utils.extract_host(volume['host'])
|
||||||
|
@ -46,7 +46,6 @@ def _usage_from_volume(context, volume_ref, **kw):
|
|||||||
usage_info = dict(tenant_id=volume_ref['project_id'],
|
usage_info = dict(tenant_id=volume_ref['project_id'],
|
||||||
host=volume_ref['host'],
|
host=volume_ref['host'],
|
||||||
user_id=volume_ref['user_id'],
|
user_id=volume_ref['user_id'],
|
||||||
instance_uuid=volume_ref['instance_uuid'],
|
|
||||||
availability_zone=volume_ref['availability_zone'],
|
availability_zone=volume_ref['availability_zone'],
|
||||||
volume_id=volume_ref['id'],
|
volume_id=volume_ref['id'],
|
||||||
volume_type=volume_ref['volume_type_id'],
|
volume_type=volume_ref['volume_type_id'],
|
||||||
|
Loading…
Reference in New Issue
Block a user