Enable multiattach capability

This patch adds the ability to create multiple attachments
for a single volume.  This change requires that a volume
either be created with a special volume-type that includes
extra-specs indicating the requirement for multiattach
capabilities from the backend:
    `extra_specs: {'multiattach': '<is> True'}`

or that an available volume is retyped to the specified
multiattach type.

What the patch does is pretty simple:
1.  On volume create
    Inspect the associated volume-type to see if it includes
    the multiattach capability, if it does and
    scheduling/creation is succesful; then we set the
    multiattach attribute on the volume to True

2.  On volume retype
    We verify that if the retype involves multiattach
    capability that it's allowed  and check policy.

    After retype, again we set/update the multiattach
    attribute of the volume appropriately to match
    the newly specified type.

Note that we do NOT do anything to enforce the volume is
formatted with a shared filesystem!  Attempting to use
this feature with a standard ext4 filesystem will lead to
one or more of your attachments erroring out and being
converted to read only mode.

blueprint: bp/multi-attach-v3-attach

Change-Id: Ic8a8ba2271d6ed672b694d3991dabd46bd9a69f4
This commit is contained in:
John Griffith 2018-01-09 00:10:32 +00:00
parent f38e77cb18
commit f1bfd9790d
7 changed files with 176 additions and 11 deletions

View File

@ -137,6 +137,8 @@ VOLUME_SHARED_TARGETS_AND_SERVICE_FIELDS = '3.48'
BACKEND_STATE_REPORT = '3.49'
MULTIATTACH_VOLUMES = '3.50'
def get_mv_header(version):
"""Gets a formatted HTTP microversion header.

View File

@ -113,6 +113,7 @@ REST_API_VERSION_HISTORY = """
* 3.47 - Support create volume from backup.
* 3.48 - Add ``shared_targets`` and ``service_uuid`` fields to volume.
* 3.49 - Support report backend storage state in service list.
* 3.50 - Add multiattach capability
"""
# The minimum and maximum versions of the API supported
@ -120,7 +121,7 @@ REST_API_VERSION_HISTORY = """
# minimum version of the API supported.
# Explicitly using /v2 endpoints will still work
_MIN_API_VERSION = "3.0"
_MAX_API_VERSION = "3.49"
_MAX_API_VERSION = "3.50"
_LEGACY_API_VERSION2 = "2.0"
UPDATED = "2017-09-19T20:18:14Z"

View File

@ -626,6 +626,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
'description',
volume_type=foo)
self.assertEqual(foo['id'], vol['volume_type_id'])
self.assertTrue(vol['multiattach'])
@mock.patch.object(key_manager, 'API', fake_keymgr.fake_api)
def test_create_volume_with_encrypted_volume_type_aes(self):

View File

@ -0,0 +1,108 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Volume retype Code."""
import mock
from oslo_config import cfg
from cinder import context
from cinder import exception
from cinder import objects
from cinder import quota
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import volume as base
from cinder.volume import volume_types
QUOTAS = quota.QUOTAS
CONF = cfg.CONF
class VolumeRetypeTestCase(base.BaseVolumeTestCase):
def setUp(self):
super(VolumeRetypeTestCase, self).setUp()
self.patch('cinder.volume.utils.clear_volume', autospec=True)
self.expected_status = 'available'
self.service_id = 1
self.user_context = context.RequestContext(user_id=fake.USER_ID,
project_id=fake.PROJECT_ID)
volume_types.create(self.context,
"old-type",
{},
description="test-multiattach")
volume_types.create(self.context,
"fake_vol_type",
{},
description="fake_type")
volume_types.create(self.context,
"multiattach-type",
{'multiattach': "<is> True"},
description="test-multiattach")
self.default_vol_type = objects.VolumeType.get_by_name_or_id(
self.context,
'fake_vol_type')
self.multiattach_type = objects.VolumeType.get_by_name_or_id(
self.context,
'multiattach-type')
def fake_get_vtype(self, context, identifier):
if identifier == "multiattach-type":
return self.multiattach_type
else:
return self.default_vol_type
@mock.patch.object(volume_types, 'get_by_name_or_id')
def test_retype_multiattach(self, _mock_get_types):
"""Verify multiattach retype restrictions."""
_mock_get_types.side_effect = self.fake_get_vtype
# Test going from default type to multiattach
vol = self.volume_api.create(self.context,
1,
'test-vol',
'')
vol.update({'status': 'available'})
vol.save()
self.volume_api.retype(self.user_context,
vol,
'multiattach-type')
vol = objects.Volume.get_by_id(self.context, vol.id)
self.assertTrue(vol.multiattach)
# Test going from multiattach to a non-multiattach type
vol = self.volume_api.create(
self.context,
1,
'test-multiattachvol',
'',
volume_type=self.multiattach_type)
vol.update({'status': 'available'})
vol.save()
self.volume_api.retype(self.user_context,
vol,
'fake_vol-type')
vol = objects.Volume.get_by_id(self.context, vol.id)
self.assertFalse(vol.multiattach)
# Test trying to retype an in-use volume
vol.update({'status': 'in-use'})
vol.save()
self.assertRaises(exception.InvalidInput,
self.volume_api.retype,
self.context,
vol,
'multiattach-type')

View File

@ -349,6 +349,8 @@ class API(base.Base):
# Refresh the object here, otherwise things ain't right
vref = objects.Volume.get_by_id(
context, vref['id'])
vref.multiattach = self._is_multiattach(volume_type)
vref.save()
LOG.info("Create volume request issued successfully.",
resource=vref)
return vref
@ -1620,14 +1622,14 @@ class API(base.Base):
# Support specifying volume type by ID or name
try:
vol_type = (
new_type = (
volume_types.get_by_name_or_id(context.elevated(), new_type))
except exception.InvalidVolumeType:
msg = _('Invalid volume_type passed: %s.') % new_type
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vol_type_id = vol_type['id']
new_type_id = new_type['id']
# NOTE(jdg): We check here if multiattach is involved in either side
# of the retype, we can't change multiattach on an in-use volume
@ -1636,12 +1638,11 @@ class API(base.Base):
# have to get through scheduling if all the conditions are met, we
# should consider an up front capabilities check to give fast feedback
# rather than "No hosts found" and error status
src_is_multiattach = volume.multiattach
tgt_is_multiattach = False
if (vol_type and
self._is_multiattach(vol_type)):
tgt_is_multiattach = True
if new_type:
tgt_is_multiattach = self._is_multiattach(new_type)
if src_is_multiattach != tgt_is_multiattach:
if volume.status != "available":
@ -1657,7 +1658,7 @@ class API(base.Base):
# early as possible, but won't commit until we change the type. We
# pass the reservations onward in case we need to roll back.
reservations = quota_utils.get_volume_type_reservation(
context, volume, vol_type_id, reserve_vol_type_only=True)
context, volume, new_type_id, reserve_vol_type_only=True)
# Get old reservations
try:
@ -1685,12 +1686,12 @@ class API(base.Base):
'migration_status': self.AVAILABLE_MIGRATION_STATUS,
'consistencygroup_id': (None, ''),
'group_id': (None, ''),
'volume_type_id': db.Not(vol_type_id)}
'volume_type_id': db.Not(new_type_id)}
# We don't support changing QoS at the front-end yet for in-use volumes
# TODO(avishay): Call Nova to change QoS setting (libvirt has support
# - virDomainSetBlockIoTune() - Nova does not have support yet).
filters = [db.volume_qos_allows_retype(vol_type_id)]
filters = [db.volume_qos_allows_retype(new_type_id)]
updates = {'status': 'retyping',
'previous_status': objects.Volume.model.status}
@ -1708,7 +1709,7 @@ class API(base.Base):
request_spec = {'volume_properties': volume,
'volume_id': volume.id,
'volume_type': vol_type,
'volume_type': new_type,
'migration_policy': migration_policy,
'quota_reservations': reservations,
'old_reservations': old_reservations}
@ -1716,6 +1717,8 @@ class API(base.Base):
self.scheduler_rpcapi.retype(context, volume,
request_spec=request_spec,
filter_properties={})
volume.multiattach = tgt_is_multiattach
volume.save()
LOG.info("Retype volume request issued successfully.",
resource=volume)

View File

@ -0,0 +1,49 @@
.. _volume_multiattach:
=============================================
Enable attaching a volume to multiple servers
=============================================
When configured to allow it and for backends that support it, Cinder
allows a volume to be attached to more than one host/server at a time.
By default this feature is only enabled for administrators, and is
controlled by policy. If the user is not an admin or the policy file
isn't modified only a single attachment per volume is allowed.
In addition, the ability to attach a volume to multiple hosts/servers
requires that the volume is of a special type that includes an extra-spec
capability setting of multiattach: True::
.. code-block:: console
$ cinder type-create multiattach
$ cinder type-key multiattach set multiattach="<is> True"
Now any volume of this type is capable of having multiple simultaneous
attachments. You'll need to ensure you have a backend device that reports
support of the multiattach capability, otherwise scheduling will fail on
create.
At this point Cinder will no longer check in-use status when creating/updating
attachments.
.. note::
This feature is only supported when using the new attachment API's,
attachment-create, attachment-update etc.
In addition, it's possible to retype a volume to be multiattach capable.
Currently however we do NOT allow retyping a volume to multiattach:True or
multiattach:False if it's status is not ``avaialable``. This is because some
consumers/hypervisors need to make special considerations at attach-time for
multiattach volumes (ie disable caching) and there's no mechanism currently to
go back to ``in-use`` volumes and update them. While going from
``multiattach:True`` --> ``multiattach:False`` isn't as problematic, it is
error prone when it comes to special cases like shelve, migrate etc. The bottom
line is it's *safer* to just avoid changing this setting on ``in-use`` volumes.
Finally, note that Cinder (nor its backends) does not do anything in terms of file
systems or control of the volumes. In other words, it's up to the user to
ensure that a multiattach or clustered file system is used on the volumes.
Otherwise there may be a high probability of data corruption.

View File

@ -44,6 +44,7 @@ Amazon EC2 Elastic Block Storage (EBS) offering.
blockstorage-volume-backups-export-import.rst
blockstorage-volume-backups.rst
blockstorage-volume-migration.rst
blockstorage-volume-multiattach.rst
blockstorage-volume-number-weigher.rst
blockstorage-report-backend-state.rst