Add volume multi attach support

This patch includes the Cinder changes needed
to support volume multiple attaches.  Nova and
python-cinderclient also need patches associated
to provide support for multiple attachments.

This adds the multiattach flag to volumes.  When a
volume is created, a multiattach flag can be set,
which allows a volume to be attached to more than
one Nova instance or host.  If the multiattach flag is
not set on a volume, it cannot be attached to more
than one Nova instance or host

Each volume attachment is tracked in a
new volume_attachment table.  The attachment id is
the unique identifier for each attachment to an
instance or host.

When a volume is to be detached the attachment
uuid must be passed in to the detach call in
order to determine which attachment should be
removed.  Since a volume can be attached to an
instance and a host, the attachment id is used
as the attachment identifier.

Nova:
https://review.openstack.org/#/c/153033/
https://review.openstack.org/#/c/153038/

python-cinderclient:
https://review.openstack.org/#/c/85856/

Change-Id: I950fa00ed5a30e7758245d5b0557f6df42dc58a3
Implements: blueprint multi-attach-volume
APIImpact
changes/47/85847/47
Walter A. Boring IV 8 years ago
parent 490f03b48a
commit 10d5421687

@ -185,7 +185,10 @@ class VolumeAdminController(AdminController):
raise exc.HTTPNotFound()
self.volume_api.terminate_connection(context, volume,
{}, force=True)
self.volume_api.detach(context, volume)
attachment_id = body['os-force_detach'].get('attachment_id', None)
self.volume_api.detach(context, volume, attachment_id)
return webob.Response(status_int=202)
@wsgi.action('os-migrate_volume')

@ -127,7 +127,11 @@ class VolumeActionsController(wsgi.Controller):
except exception.VolumeNotFound as error:
raise webob.exc.HTTPNotFound(explanation=error.msg)
self.volume_api.detach(context, volume)
attachment_id = None
if body['os-detach']:
attachment_id = body['os-detach'].get('attachment_id', None)
self.volume_api.detach(context, volume, attachment_id)
return webob.Response(status_int=202)
@wsgi.action('os-reserve')

@ -48,18 +48,18 @@ def _translate_attachment_detail_view(_context, vol):
def _translate_attachment_summary_view(_context, vol):
"""Maps keys for attachment summary view."""
d = {}
volume_id = vol['id']
# NOTE(justinsb): We use the volume id as the id of the attachment object
d['id'] = volume_id
d['volume_id'] = volume_id
d['server_id'] = vol['instance_uuid']
d['host_name'] = vol['attached_host']
if vol.get('mountpoint'):
d['device'] = vol['mountpoint']
d = []
attachments = vol.get('volume_attachment', [])
for attachment in attachments:
if attachment.get('attach_status') == 'attached':
a = {'id': attachment.get('volume_id'),
'attachment_id': attachment.get('id'),
'volume_id': attachment.get('volume_id'),
'server_id': attachment.get('instance_uuid'),
'host_name': attachment.get('attached_host'),
'device': attachment.get('mountpoint'),
}
d.append(a)
return d
@ -91,10 +91,14 @@ def _translate_volume_summary_view(context, vol, image_id=None):
else:
d['bootable'] = 'false'
if vol['multiattach']:
d['multiattach'] = 'true'
else:
d['multiattach'] = 'false'
d['attachments'] = []
if vol['attach_status'] == 'attached':
attachment = _translate_attachment_detail_view(context, vol)
d['attachments'].append(attachment)
d['attachments'] = _translate_attachment_detail_view(context, vol)
d['display_name'] = vol['display_name']
d['display_description'] = vol['display_description']
@ -146,6 +150,7 @@ def make_volume(elem):
elem.set('volume_type')
elem.set('snapshot_id')
elem.set('source_volid')
elem.set('multiattach')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
@ -373,6 +378,8 @@ class VolumeController(wsgi.Controller):
size = kwargs['source_volume']['size']
LOG.info(_LI("Create volume of %s GB"), size, context=context)
multiattach = volume.get('multiattach', False)
kwargs['multiattach'] = multiattach
image_href = None
image_uuid = None

@ -70,7 +70,8 @@ class ViewBuilder(common.ViewBuilder):
'bootable': str(volume.get('bootable')).lower(),
'encrypted': self._is_volume_encrypted(volume),
'replication_status': volume.get('replication_status'),
'consistencygroup_id': volume.get('consistencygroup_id')
'consistencygroup_id': volume.get('consistencygroup_id'),
'multiattach': volume.get('multiattach')
}
}
@ -83,19 +84,17 @@ class ViewBuilder(common.ViewBuilder):
attachments = []
if volume['attach_status'] == 'attached':
d = {}
volume_id = volume['id']
# note(justinsb): we use the volume id as the id of the attachments
# object
d['id'] = volume_id
d['volume_id'] = volume_id
d['server_id'] = volume['instance_uuid']
d['host_name'] = volume['attached_host']
if volume.get('mountpoint'):
d['device'] = volume['mountpoint']
attachments.append(d)
attaches = volume.get('volume_attachment', [])
for attachment in attaches:
if attachment.get('attach_status') == 'attached':
a = {'id': attachment.get('volume_id'),
'attachment_id': attachment.get('id'),
'volume_id': attachment.get('volume_id'),
'server_id': attachment.get('instance_uuid'),
'host_name': attachment.get('attached_host'),
'device': attachment.get('mountpoint'),
}
attachments.append(a)
return attachments

@ -44,6 +44,7 @@ SCHEDULER_HINTS_NAMESPACE =\
def make_attachment(elem):
elem.set('id')
elem.set('attachment_id')
elem.set('server_id')
elem.set('host_name')
elem.set('volume_id')
@ -63,6 +64,7 @@ def make_volume(elem):
elem.set('snapshot_id')
elem.set('source_volid')
elem.set('consistencygroup_id')
elem.set('multiattach')
attachments = xmlutil.SubTemplateElement(elem, 'attachments')
attachment = xmlutil.SubTemplateElement(attachments, 'attachment',
@ -412,6 +414,8 @@ class VolumeController(wsgi.Controller):
kwargs['availability_zone'] = volume.get('availability_zone', None)
kwargs['scheduler_hints'] = volume.get('scheduler_hints', None)
multiattach = volume.get('multiattach', False)
kwargs['multiattach'] = multiattach
new_volume = self.volume_api.create(context,
size,

@ -197,18 +197,28 @@ class BackupManager(manager.SchedulerDependentManager):
for volume in volumes:
volume_host = volume_utils.extract_host(volume['host'], 'backend')
backend = self._get_volume_backend(host=volume_host)
if volume['status'] == 'backing-up':
LOG.info(_LI('Resetting volume %s to available '
'(was backing-up).') % volume['id'])
mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id'])
if volume['status'] == 'restoring-backup':
LOG.info(_LI('Resetting volume %s to error_restoring '
'(was restoring-backup).') % volume['id'])
mgr = self._get_manager(backend)
mgr.detach_volume(ctxt, volume['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
attachments = volume['volume_attachment']
if attachments:
if volume['status'] == 'backing-up':
LOG.info(_LI('Resetting volume %s to available '
'(was backing-up).'), volume['id'])
mgr = self._get_manager(backend)
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
if volume['status'] == 'restoring-backup':
LOG.info(_LI('setting volume %s to error_restoring '
'(was restoring-backup).'), volume['id'])
mgr = self._get_manager(backend)
for attachment in attachments:
if (attachment['attached_host'] == self.host and
attachment['instance_uuid'] is None):
mgr.detach_volume(ctxt, volume['id'],
attachment['id'])
self.db.volume_update(ctxt, volume['id'],
{'status': 'error_restoring'})
# TODO(smulcahy) implement full resume of backup and restore
# operations on restart (rather than simply resetting)

@ -136,10 +136,16 @@ def iscsi_target_create_safe(context, values):
###############
def volume_attached(context, volume_id, instance_id, host_name, mountpoint):
def volume_attach(context, values):
"""Attach a volume."""
return IMPL.volume_attach(context, values)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
attach_mode='rw'):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint)
mountpoint, attach_mode)
def volume_create(context, values):
@ -169,9 +175,9 @@ def volume_destroy(context, volume_id):
return IMPL.volume_destroy(context, volume_id)
def volume_detached(context, volume_id):
def volume_detached(context, volume_id, attachment_id):
"""Ensure that a volume is set as detached."""
return IMPL.volume_detached(context, volume_id)
return IMPL.volume_detached(context, volume_id, attachment_id)
def volume_get(context, volume_id):
@ -219,6 +225,27 @@ def volume_update(context, volume_id, values):
return IMPL.volume_update(context, volume_id, values)
def volume_attachment_update(context, attachment_id, values):
return IMPL.volume_attachment_update(context, attachment_id, values)
def volume_attachment_get(context, attachment_id, session=None):
return IMPL.volume_attachment_get(context, attachment_id, session)
def volume_attachment_get_used_by_volume_id(context, volume_id):
return IMPL.volume_attachment_get_used_by_volume_id(context, volume_id)
def volume_attachment_get_by_host(context, volume_id, host):
return IMPL.volume_attachment_get_by_host(context, volume_id, host)
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
return IMPL.volume_attachment_get_by_instance_uuid(context, volume_id,
instance_uuid)
####################

@ -975,18 +975,51 @@ def reservation_expire(context):
@require_admin_context
def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint):
def volume_attach(context, values):
volume_attachment_ref = models.VolumeAttachment()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_attachment_ref.update(values)
session = get_session()
with session.begin():
volume_attachment_ref.save(session=session)
return volume_attachment_get(context, values['id'],
session=session)
@require_admin_context
def volume_attached(context, attachment_id, instance_uuid, host_name,
mountpoint, attach_mode='rw'):
"""This method updates a volume attachment entry.
This function saves the information related to a particular
attachment for a volume. It also updates the volume record
to mark the volume as attached.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
volume_ref = _volume_get(context, volume_id, session=session)
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref['mountpoint'] = mountpoint
volume_attachment_ref['attach_status'] = 'attached'
volume_attachment_ref['instance_uuid'] = instance_uuid
volume_attachment_ref['attached_host'] = host_name
volume_attachment_ref['attach_time'] = timeutils.utcnow()
volume_attachment_ref['attach_mode'] = attach_mode
volume_ref = _volume_get(context, volume_attachment_ref['volume_id'],
session=session)
volume_attachment_ref.save(session=session)
volume_ref['status'] = 'in-use'
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
volume_ref['instance_uuid'] = instance_uuid
volume_ref['attached_host'] = host_name
volume_ref.save(session=session)
return volume_ref
@ -1134,18 +1167,57 @@ def volume_destroy(context, volume_id):
@require_admin_context
def volume_detached(context, volume_id):
def volume_detach(context, attachment_id):
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref['attach_status'] = 'detaching'
volume_attachment_ref.save(session=session)
@require_admin_context
def volume_detached(context, volume_id, attachment_id):
"""This updates a volume attachment and marks it as detached.
This method also ensures that the volume entry is correctly
marked as either still attached/in-use or detached/available
if this was the last detachment made.
"""
session = get_session()
with session.begin():
attachment = volume_attachment_get(context, attachment_id,
session=session)
# If this is already detached, attachment will be None
if attachment:
now = timeutils.utcnow()
attachment['attach_status'] = 'detached'
attachment['detach_time'] = now
attachment['deleted'] = True
attachment['deleted_at'] = now
attachment.save(session=session)
attachment_list = volume_attachment_get_used_by_volume_id(
context, volume_id, session=session)
remain_attachment = False
if attachment_list and len(attachment_list) > 0:
remain_attachment = True
volume_ref = _volume_get(context, volume_id, session=session)
# Hide status update from user if we're performing a volume migration
if not volume_ref['migration_status']:
volume_ref['status'] = 'available'
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
volume_ref['instance_uuid'] = None
volume_ref['attached_host'] = None
volume_ref['attach_time'] = None
if not remain_attachment:
# Hide status update from user if we're performing volume migration
if not volume_ref['migration_status']:
volume_ref['status'] = 'available'
volume_ref['attach_status'] = 'detached'
volume_ref.save(session=session)
else:
# Volume is still attached
volume_ref['status'] = 'in-use'
volume_ref['attach_status'] = 'attached'
volume_ref.save(session=session)
@require_context
@ -1156,12 +1228,14 @@ def _volume_get_query(context, session=None, project_only=False):
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type')).\
options(joinedload('volume_attachment')).\
options(joinedload('consistencygroup'))
@ -1177,6 +1251,54 @@ def _volume_get(context, volume_id, session=None):
return result
@require_context
def volume_attachment_get(context, attachment_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(id=attachment_id).\
first()
if not result:
raise exception.VolumeAttachmentNotFound(filter='attachment_id = %s' %
attachment_id)
return result
@require_context
def volume_attachment_get_used_by_volume_id(context, volume_id, session=None):
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter(models.VolumeAttachment.attach_status != 'detached').\
all()
return result
@require_context
def volume_attachment_get_by_host(context, volume_id, host):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter_by(attached_host=host).\
filter(models.VolumeAttachment.attach_status != 'detached').\
first()
return result
@require_context
def volume_attachment_get_by_instance_uuid(context, volume_id, instance_uuid):
session = get_session()
with session.begin():
result = model_query(context, models.VolumeAttachment,
session=session).\
filter_by(volume_id=volume_id).\
filter_by(instance_uuid=instance_uuid).\
filter(models.VolumeAttachment.attach_status != 'detached').\
first()
return result
@require_context
def volume_get(context, volume_id):
return _volume_get(context, volume_id)
@ -1544,6 +1666,17 @@ def volume_update(context, volume_id, values):
return volume_ref
@require_context
def volume_attachment_update(context, attachment_id, values):
session = get_session()
with session.begin():
volume_attachment_ref = volume_attachment_get(context, attachment_id,
session=session)
volume_attachment_ref.update(values)
volume_attachment_ref.save(session=session)
return volume_attachment_ref
####################
def _volume_x_metadata_get_query(context, volume_id, model, session=None):

@ -0,0 +1,147 @@
# (c) Copyright 2012-2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import six
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, MetaData, String, Table
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CREATED_AT = datetime.datetime.now()
def upgrade(migrate_engine):
"""Add volume multi attachment table."""
meta = MetaData()
meta.bind = migrate_engine
# add the multiattach flag to the volumes table.
volumes = Table('volumes', meta, autoload=True)
multiattach = Column('multiattach', Boolean)
volumes.create_column(multiattach)
volumes.update().values(multiattach=False).execute()
# The new volume_attachment table
volume_attachment = Table(
'volume_attachment', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), ForeignKey('volumes.id'),
nullable=False),
Column('attached_host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('mountpoint', String(length=255)),
Column('attach_time', DateTime),
Column('detach_time', DateTime),
Column('attach_mode', String(length=36)),
Column('attach_status', String(length=255)),
mysql_engine='InnoDB'
)
try:
volume_attachment.create()
except Exception:
LOG.error(_LE("Table volume_attachment not created!"))
raise
# now migrate existing volume attachment info into the
# new volume_attachment table
volumes_list = list(volumes.select().execute())
for volume in volumes_list:
if volume.attach_status == 'attached':
attachment = volume_attachment.insert()
values = {'id': six.text_type(uuid.uuid4()),
'created_at': CREATED_AT,
'deleted_at': None,
'deleted': 0,
'volume_id': volume.id,
'attached_host': volume.host,
'instance_uuid': volume.instance_uuid,
'mountpoint': volume.mountpoint,
'attach_time': volume.attach_time,
'attach_mode': 'rw',
'attach_status': 'attached',
}
attachment.execute(values)
# we have no reason to keep the columns that now
# exist in the volume_attachment table
mountpoint = volumes.columns.mountpoint
volumes.drop_column(mountpoint)
instance_uuid = volumes.columns.instance_uuid
volumes.drop_column(instance_uuid)
attach_time = volumes.columns.attach_time
volumes.drop_column(attach_time)
attached_host = volumes.columns.attached_host
volumes.drop_column(attached_host)
def downgrade(migrate_engine):
"""Remove volume_attachment table."""
meta = MetaData()
meta.bind = migrate_engine
# Put the needed volumes table columns back
volumes = Table('volumes', meta, autoload=True)
multiattach = volumes.columns.multiattach
volumes.drop_column(multiattach)
attached_host = Column('attached_host', String(length=255))
volumes.create_column(attached_host)
volumes.update().values(attached_host=None).execute()
attach_time = Column('attach_time', String(length=255))
volumes.create_column(attach_time)
volumes.update().values(attach_time=None).execute()
instance_uuid = Column('instance_uuid', String(length=36))
volumes.create_column(instance_uuid)
volumes.update().values(instance_uuid=None).execute()
mountpoint = Column('mountpoint', String(length=255))
volumes.create_column(mountpoint)
volumes.update().values(mountpoint=None).execute()
volume_attachment = Table('volume_attachment', meta, autoload=True)
attachments = list(volume_attachment.select().execute())
for attachment in attachments:
# we are going to lose data here for
# multiple attaches. We'll migrate and the
# last update wins.
if not attachment.deleted_at:
volume_id = attachment.volume_id
volumes.update().\
where(volumes.c.id == volume_id).\
values(mountpoint=attachment.mountpoint,
attached_host=attachment.attached_host,
attach_time=attachment.attach_time,
instance_uuid=attachment.instance_uuid).\
execute()
try:
volume_attachment.drop()
except Exception:
LOG.error(_LE("Dropping volume_attachment table failed."))
raise

@ -0,0 +1,87 @@
BEGIN TRANSACTION;
CREATE TABLE volumes_v39 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN,
id VARCHAR(36) NOT NULL,
ec2_id INTEGER,
user_id VARCHAR(255),
project_id VARCHAR(255),
snapshot_id VARCHAR(36),
host VARCHAR(255),
size INTEGER,
availability_zone VARCHAR(255),
status VARCHAR(255),
attach_status VARCHAR(255),
scheduled_at DATETIME,
launched_at DATETIME,
terminated_at DATETIME,
display_name VARCHAR(255),
display_description VARCHAR(255),
provider_location VARCHAR(255),
provider_auth VARCHAR(255),
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable INTEGER,
provider_geometry VARCHAR(255),
_name_id VARCHAR(36),
encryption_key_id VARCHAR(36),
migration_status VARCHAR(255),
attached_host VARCHAR(255),
attach_time VARCHAR(255),
instance_uuid VARCHAR(36),
mountpoint VARCHAR(255),
consistencygroup_id VARCHAR(36),
replication_status VARCHAR(255),
replication_extended_status VARCHAR(255),
replication_driver_data VARCHAR(255),
PRIMARY KEY (id)
);
INSERT INTO volumes_v39
SELECT volumes.created_at,
volumes.updated_at,
volumes.deleted_at,
volumes.deleted,
volumes.id,
volumes.ec2_id,
volumes.user_id,
volumes.project_id,
volumes.snapshot_id,
volumes.host,
volumes.size,
volumes.availability_zone,
volumes.status,
volumes.attach_status,
volumes.scheduled_at,
volumes.launched_at,
volumes.terminated_at,
volumes.display_name,
volumes.display_description,
volumes.provider_location,
volumes.provider_auth,
volumes.volume_type_id,
volumes.source_volid,
volumes.bootable,
volumes.provider_geometry,
volumes._name_id,
volumes.encryption_key_id,
volumes.migration_status,
volume_attachment.attached_host,
volume_attachment.attach_time,
volume_attachment.instance_uuid,
volume_attachment.mountpoint,
volumes.consistencygroup_id,
volumes.replication_status,
volumes.replication_extended_status,
volumes.replication_driver_data
FROM volumes
LEFT OUTER JOIN volume_attachment
ON volumes.id=volume_attachment.volume_id;
DROP TABLE volumes;
ALTER TABLE volumes_v39 RENAME TO volumes;
DROP TABLE volume_attachment;
COMMIT;

@ -129,10 +129,6 @@ class Volume(BASE, CinderBase):
host = Column(String(255)) # , ForeignKey('hosts.id'))
size = Column(Integer)
availability_zone = Column(String(255)) # TODO(vish): foreign key?
instance_uuid = Column(String(36))
attached_host = Column(String(255))
mountpoint = Column(String(255))
attach_time = Column(String(255)) # TODO(vish): datetime
status = Column(String(255)) # TODO(vish): enum?
attach_status = Column(String(255)) # TODO(vish): enum
migration_status = Column(String(255))
@ -157,6 +153,7 @@ class Volume(BASE, CinderBase):
deleted = Column(Boolean, default=False)
bootable = Column(Boolean, default=False)
multiattach = Column(Boolean, default=False)
replication_status = Column(String(255))
replication_extended_status = Column(String(255))
@ -197,6 +194,26 @@ class VolumeAdminMetadata(BASE, CinderBase):
'VolumeAdminMetadata.deleted == False)')
class VolumeAttachment(BASE, CinderBase):
"""Represents a volume attachment for a vm."""
__tablename__ = 'volume_attachment'
id = Column(String(36), primary_key=True)
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_attachment",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeAttachment.volume_id == Volume.id,'
'VolumeAttachment.deleted == False)')
instance_uuid = Column(String(36))
attached_host = Column(String(255))
mountpoint = Column(String(255))
attach_time = Column(DateTime)
detach_time = Column(DateTime)
attach_status = Column(String(255))
attach_mode = Column(String(255))
class VolumeTypes(BASE, CinderBase):
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
@ -576,6 +593,7 @@ def register_models():
Volume,
VolumeMetadata,
VolumeAdminMetadata,
VolumeAttachment,
SnapshotMetadata,
Transfer,
VolumeTypeExtraSpecs,

@ -252,6 +252,11 @@ class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeAttachmentNotFound(NotFound):
message = _("Volume attachment could not be found with "
"filter: %(filter)s .")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")

@ -45,10 +45,6 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
'host': fields.StringField(nullable=True),
'size': fields.IntegerField(),
'availability_zone': fields.StringField(),
'instance_uuid': fields.UUIDField(nullable=True),
'attached_host': fields.StringField(nullable=True),
'mountpoint': fields.StringField(nullable=True),
'attach_time': fields.StringField(nullable=True),
'status': fields.StringField(),
'attach_status': fields.StringField(),
'migration_status': fields.StringField(nullable=True),

@ -391,15 +391,14 @@ class AdminActionsTest(test.TestCase):
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
mountpoint = '/dev/vbd'
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
mountpoint, 'rw')
attachment = self.volume_api.attach(ctx, volume, stubs.FAKE_UUID,
None, mountpoint, 'rw')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEqual(volume['status'], 'in-use')
self.assertEqual(volume['instance_uuid'], stubs.FAKE_UUID)
self.assertIsNone(volume['attached_host'])
self.assertEqual(volume['mountpoint'], mountpoint)
self.assertEqual(volume['attach_status'], 'attached')
self.assertEqual(attachment['instance_uuid'], stubs.FAKE_UUID)
self.assertEqual(attachment['mountpoint'], mountpoint)
self.assertEqual(attachment['attach_status'], 'attached')
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
@ -415,7 +414,8 @@ class AdminActionsTest(test.TestCase):
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-force_detach': None})
req.body = jsonutils.dumps({'os-force_detach':
{'attachment_id': attachment['id']}})
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
@ -423,12 +423,12 @@ class AdminActionsTest(test.TestCase):
# request is accepted
self.assertEqual(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
self.assertRaises(exception.VolumeAttachmentNotFound,
db.volume_attachment_get,
ctx, attachment['id'])
# status changed to 'available'
self.assertEqual(volume['status'], 'available')
self.assertIsNone(volume['instance_uuid'])
self.assertIsNone(volume['attached_host'])
self.assertIsNone(volume['mountpoint'])
self.assertEqual(volume['attach_status'], 'detached')
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
@ -445,17 +445,18 @@ class AdminActionsTest(test.TestCase):
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
host_name = 'fake-host'
self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro')
attachment = self.volume_api.attach(ctx, volume, None, host_name,
mountpoint, 'ro')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEqual(volume['status'], 'in-use')
self.assertIsNone(volume['instance_uuid'])
self.assertEqual(volume['attached_host'], host_name)
self.assertEqual(volume['mountpoint'], mountpoint)
self.assertEqual(volume['attach_status'], 'attached')
self.assertIsNone(attachment['instance_uuid'])
self.assertEqual(attachment['attached_host'], host_name)
self.assertEqual(attachment['mountpoint'], mountpoint)
self.assertEqual(attachment['attach_status'], 'attached')
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 2)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
@ -470,7 +471,8 @@ class AdminActionsTest(test.TestCase):
req.method = 'POST'
req.headers['content-type'] = 'application/json'
# request status of 'error'
req.body = jsonutils.dumps({'os-force_detach': None})
req.body = jsonutils.dumps({'os-force_detach':
{'attachment_id': attachment['id']}})
# attach admin context to request
req.environ['cinder.context'] = ctx
# make request
@ -478,12 +480,11 @@ class AdminActionsTest(test.TestCase):
# request is accepted
self.assertEqual(resp.status_int, 202)
volume = db.volume_get(ctx, volume['id'])
self.assertRaises(exception.VolumeAttachmentNotFound,
db.volume_attachment_get,
ctx, attachment['id'])
# status changed to 'available'
self.assertEqual(volume['status'], 'available')
self.assertIsNone(volume['instance_uuid'])
self.assertIsNone(volume['attached_host'])
self.assertIsNone(volume['mountpoint'])
self.assertEqual(volume['attach_status'], 'detached')
admin_metadata = volume['volume_admin_metadata']
self.assertEqual(len(admin_metadata), 1)
self.assertEqual(admin_metadata[0]['key'], 'readonly')
@ -502,11 +503,10 @@ class AdminActionsTest(test.TestCase):
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
mountpoint = '/dev/vbd'
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
mountpoint, 'rw')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
self.volume_api.attach(ctx, volume, fakes.get_fake_uuid(), None,
'/dev/vbd0', 'rw')
self.assertEqual(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
@ -514,15 +514,7 @@ class AdminActionsTest(test.TestCase):
volume,
fakes.get_fake_uuid(),
None,
mountpoint,
'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
None,
mountpoint,
'/dev/vdb1',
'ro')
# cleanup
svc.stop()
@ -538,9 +530,9 @@ class AdminActionsTest(test.TestCase):
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
mountpoint = '/dev/vbd'
host_name = 'fake_host'
self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'rw')
self.volume_api.initialize_connection(ctx, volume, connector)
self.volume_api.attach(ctx, volume, None, 'fake_host1',
'/dev/vbd0', 'rw')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
conn_info['data']['access_mode'] = 'rw'
@ -549,16 +541,8 @@ class AdminActionsTest(test.TestCase):
ctx,
volume,
None,
host_name,
mountpoint,
'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
None,
host_name,
mountpoint,
'fake_host2',
'/dev/vbd1',
'ro')
# cleanup
svc.stop()
@ -587,19 +571,23 @@ class AdminActionsTest(test.TestCase):
'provider_location': '', 'size': 1})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
values = {'status': 'attaching',
'instance_uuid': fakes.get_fake_uuid()}
db.volume_update(ctx, volume['id'], values)
self.volume_api.reserve_volume(ctx, volume)
values = {'volume_id': volume['id'],
'attach_status': 'attaching',
'attach_time': timeutils.utcnow(),
'instance_uuid': 'abc123',
}
db.volume_attach(ctx, values)
db.volume_admin_metadata_update(ctx, volume['id'],
{"attached_mode": 'rw'}, False)
mountpoint = '/dev/vbd'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
stubs.FAKE_UUID,
None,
mountpoint,
'rw')
# cleanup
attachment = self.volume_api.attach(ctx, volume,
stubs.FAKE_UUID, None,
mountpoint, 'rw')
self.assertEqual(stubs.FAKE_UUID, attachment['instance_uuid'])
self.assertEqual(volume['id'], attachment['volume_id'], volume['id'])
self.assertEqual('attached', attachment['attach_status'])
svc.stop()
def test_attach_attaching_volume_with_different_mode(self):

@ -37,7 +37,7 @@ CONF = cfg.CONF
class VolumeActionsTest(test.TestCase):
_actions = ('os-detach', 'os-reserve', 'os-unreserve')
_actions = ('os-reserve', 'os-unreserve')
_methods = ('attach', 'detach', 'reserve_volume', 'unreserve_volume')
@ -179,6 +179,16 @@ class VolumeActionsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_detach(self):
body = {'os-detach': {'attachment_id': 'fakeuuid'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(202, res.status_int)
def test_attach_with_invalid_arguments(self):
# Invalid request to attach volume an invalid target
body = {'os-attach': {'mountpoint': '/dev/vdc'}}

@ -29,9 +29,6 @@ def stub_volume(id, **kwargs):
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'attached_host': None,
'mountpoint': '/',
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
@ -46,6 +43,8 @@ def stub_volume(id, **kwargs):
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'},
'volume_attachment': [],
'multiattach': False,
'readonly': 'False'}
volume.update(kwargs)

@ -85,11 +85,8 @@ class VolumeApiTest(test.TestCase):
'availability_zone': 'zone1:host1',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
@ -176,11 +173,8 @@ class VolumeApiTest(test.TestCase):
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': test_id,
@ -258,13 +252,8 @@ class VolumeApiTest(test.TestCase):
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
@ -294,13 +283,8 @@ class VolumeApiTest(test.TestCase):
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
@ -328,6 +312,10 @@ class VolumeApiTest(test.TestCase):
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
updates = {
"display_name": "Updated Test Name",
@ -339,18 +327,20 @@ class VolumeApiTest(test.TestCase):
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'attachment_id': attachment['id'],
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'device': '/'
}],
'multiattach': 'false',
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
@ -400,11 +390,8 @@ class VolumeApiTest(test.TestCase):
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
@ -430,21 +417,28 @@ class VolumeApiTest(test.TestCase):
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v1/volumes')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
expected = {'volumes': [{'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'attachments': [
{'attachment_id': attachment['id'],
'device': '/',
'server_id': stubs.FAKE_UUID,
'host_name': None,
'id': '1',
'volume_id': '1'}],
'multiattach': 'false',
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
@ -469,11 +463,8 @@ class VolumeApiTest(test.TestCase):
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'attachments': [],
'multiattach': 'false',
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
@ -499,21 +490,28 @@ class VolumeApiTest(test.TestCase):
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
values = {'volume_id': '1', }
attachment = db.volume_attach(context.get_admin_context(), values)
db.volume_attached(context.get_admin_context(),
attachment['id'], stubs.FAKE_UUID, None, '/')
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
expected = {'volumes': [{'status': 'in-use',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'<