Adding Read-Only volume attaching support to Cinder

1. Adding an API extension to allow clients set volume Read-Only flag on
demand.
2. Require client to provide and be aware of volume attaching mode when
they call 'os-attach' API.
3. Adding a 'access_mode' field to connection info which
'os-initialize_connection' API returned. This field should be used by
client such as Nova to use correct mode accessing attached volume.
Currently access mode can be 'rw' or 'ro'.
4. In future, the driver within Cinder need to ensure the volume be
exposed under the correct access mode which connection info described,
for example backend should set volume to readonly mode when connection
info ask client using 'ro' access mode consume attached volume. That
means Read-Only is not only a attaching mode but also a status for a
volume.

blueprint read-only-volumes

Change-Id: I4c84614d6541d5f7c358abadb957da7b8c3d9c48
Signed-off-by: Zhi Yan Liu <zhiyanl@cn.ibm.com>
This commit is contained in:
Zhi Yan Liu
2013-07-20 06:18:24 +08:00
parent 4daffc63e7
commit 900851ffe3
27 changed files with 990 additions and 140 deletions

View File

@@ -85,6 +85,10 @@ class VolumeActionsController(wsgi.Controller):
if 'host_name' in body['os-attach']:
host_name = body['os-attach']['host_name']
mountpoint = body['os-attach']['mountpoint']
if 'mode' in body['os-attach']:
mode = body['os-attach']['mode']
else:
mode = 'rw'
if instance_uuid and host_name:
msg = _("Invalid request to attach volume to an "
@@ -98,8 +102,13 @@ class VolumeActionsController(wsgi.Controller):
msg = _("Invalid request to attach volume to an invalid target")
raise webob.exc.HTTPBadRequest(explanation=msg)
if mode not in ('rw', 'ro'):
msg = _("Invalid request to attach volume with an invalid mode. "
"Attaching mode should be 'rw' or 'ro'")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.volume_api.attach(context, volume,
instance_uuid, host_name, mountpoint)
instance_uuid, host_name, mountpoint, mode)
return webob.Response(status_int=202)
@wsgi.action('os-detach')
@@ -210,8 +219,8 @@ class VolumeActionsController(wsgi.Controller):
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
try:
val = int(body['os-extend']['new_size'])
except ValueError:
_val = int(body['os-extend']['new_size'])
except (KeyError, ValueError):
msg = _("New volume size must be specified as an integer.")
raise webob.exc.HTTPBadRequest(explanation=msg)
@@ -219,6 +228,27 @@ class VolumeActionsController(wsgi.Controller):
self.volume_api.extend(context, volume, size)
return webob.Response(status_int=202)
@wsgi.action('os-update_readonly_flag')
def _volume_readonly_update(self, req, id, body):
"""Update volume readonly flag."""
context = req.environ['cinder.context']
volume = self.volume_api.get(context, id)
if not self.is_valid_body(body, 'os-update_readonly_flag'):
msg = _("No 'os-update_readonly_flag' was specified "
"in request.")
raise webob.exc.HTTPBadRequest(explanation=msg)
readonly_flag = body['os-update_readonly_flag'].get('readonly')
if not isinstance(readonly_flag, bool):
msg = _("Volume 'readonly' flag must be specified "
"in request as a boolean.")
raise webob.exc.HTTPBadRequest(explanation=msg)
self.volume_api.update_readonly_flag(context, volume, readonly_flag)
return webob.Response(status_int=202)
class Volume_actions(extensions.ExtensionDescriptor):
"""Enable volume actions

View File

@@ -209,11 +209,52 @@ class CreateDeserializer(CommonDeserializer):
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def __init__(self, ext_mgr):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
def _add_visible_admin_metadata(self, context, volume):
if context is None:
return
visible_admin_meta = {}
volume_tmp = (volume if context.is_admin else
self.volume_api.get(context.elevated(), volume['id']))
if volume_tmp.get('volume_admin_metadata'):
for item in volume_tmp['volume_admin_metadata']:
if item['key'] in self._visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume_tmp.get('admin_metadata') and
isinstance(volume_tmp.get('admin_metadata'), dict)):
for key in self._visible_admin_metadata_keys:
if key in volume_tmp['admin_metadata'].keys():
visible_admin_meta[key] = volume_tmp['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = volume.get('volume_metadata')
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems():
orig_meta.append({'key': key, 'value': value})
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
@@ -224,6 +265,8 @@ class VolumeController(wsgi.Controller):
except exception.NotFound:
raise exc.HTTPNotFound()
self._add_visible_admin_metadata(context, vol)
return {'volume': _translate_volume_detail_view(context, vol)}
def delete(self, req, id):
@@ -267,6 +310,10 @@ class VolumeController(wsgi.Controller):
volumes = self.volume_api.get_all(context, marker=None, limit=None,
sort_key='created_at',
sort_dir='desc', filters=search_opts)
for volume in volumes:
self._add_visible_admin_metadata(context, volume)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@@ -361,9 +408,11 @@ class VolumeController(wsgi.Controller):
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context,
dict(new_volume.iteritems()),
image_uuid)
new_volume = dict(new_volume.iteritems())
self._add_visible_admin_metadata(context, new_volume)
retval = _translate_volume_detail_view(context, new_volume, image_uuid)
return {'volume': retval}
@@ -403,6 +452,8 @@ class VolumeController(wsgi.Controller):
volume.update(update_dict)
self._add_visible_admin_metadata(context, volume)
return {'volume': _translate_volume_detail_view(context, volume)}

View File

@@ -152,11 +152,52 @@ class VolumeController(wsgi.Controller):
_view_builder_class = volume_views.ViewBuilder
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def __init__(self, ext_mgr):
self.volume_api = volume.API()
self.ext_mgr = ext_mgr
super(VolumeController, self).__init__()
def _add_visible_admin_metadata(self, context, volume):
if context is None:
return
visible_admin_meta = {}
volume_tmp = (volume if context.is_admin else
self.volume_api.get(context.elevated(), volume['id']))
if volume_tmp.get('volume_admin_metadata'):
for item in volume_tmp['volume_admin_metadata']:
if item['key'] in self._visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume_tmp.get('admin_metadata') and
isinstance(volume_tmp.get('admin_metadata'), dict)):
for key in self._visible_admin_metadata_keys:
if key in volume_tmp['admin_metadata'].keys():
visible_admin_meta[key] = volume_tmp['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = volume.get('volume_metadata')
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.iteritems():
orig_meta.append({'key': key, 'value': value})
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
@wsgi.serializers(xml=VolumeTemplate)
def show(self, req, id):
"""Return data about the given volume."""
@@ -168,6 +209,8 @@ class VolumeController(wsgi.Controller):
msg = _("Volume could not be found")
raise exc.HTTPNotFound(explanation=msg)
self._add_visible_admin_metadata(context, vol)
return self._view_builder.detail(req, vol)
def delete(self, req, id):
@@ -223,6 +266,10 @@ class VolumeController(wsgi.Controller):
volumes = self.volume_api.get_all(context, marker, limit, sort_key,
sort_dir, filters)
for volume in volumes:
self._add_visible_admin_metadata(context, volume)
limited_list = common.limited(volumes, req)
if is_detail:
@@ -324,7 +371,11 @@ class VolumeController(wsgi.Controller):
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = self._view_builder.summary(req, dict(new_volume.iteritems()))
new_volume = dict(new_volume.iteritems())
self._add_visible_admin_metadata(context, new_volume)
retval = self._view_builder.summary(req, new_volume)
return retval
@@ -377,6 +428,8 @@ class VolumeController(wsgi.Controller):
volume.update(update_dict)
self._add_visible_admin_metadata(context, volume)
return self._view_builder.detail(req, volume)

View File

@@ -345,6 +345,24 @@ def volume_metadata_update(context, volume_id, metadata, delete):
##################
def volume_admin_metadata_get(context, volume_id):
"""Get all administration metadata for a volume."""
return IMPL.volume_admin_metadata_get(context, volume_id)
def volume_admin_metadata_delete(context, volume_id, key):
"""Delete the given metadata item."""
IMPL.volume_admin_metadata_delete(context, volume_id, key)
def volume_admin_metadata_update(context, volume_id, metadata, delete):
"""Update metadata if it exists, otherwise create it."""
IMPL.volume_admin_metadata_update(context, volume_id, metadata, delete)
##################
def volume_type_create(context, values):
"""Create a new volume type."""
return IMPL.volume_type_create(context, values)

View File

@@ -20,7 +20,6 @@
"""Implementation of SQLAlchemy backend."""
import datetime
import sys
import uuid
import warnings
@@ -1043,6 +1042,11 @@ def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint):
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
if is_admin_context(context):
values['volume_admin_metadata'] = \
_metadata_refs(values.get('admin_metadata'),
models.VolumeAdminMetadata)
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
@@ -1121,12 +1125,13 @@ def finish_volume_migration(context, src_vol_id, dest_vol_id):
@require_admin_context
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
with session.begin():
session.query(models.Volume).\
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'deleted_at': now,
'updated_at': literal_column('updated_at')})
session.query(models.IscsiTarget).\
filter_by(volume_id=volume_id).\
@@ -1134,7 +1139,12 @@ def volume_destroy(context, volume_id):
session.query(models.VolumeMetadata).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'deleted_at': now,
'updated_at': literal_column('updated_at')})
session.query(models.VolumeAdminMetadata).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
@@ -1156,10 +1166,17 @@ def volume_detached(context, volume_id):
@require_context
def _volume_get_query(context, session=None, project_only=False):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type'))
if is_admin_context(context):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type'))
@require_context
@@ -1206,6 +1223,7 @@ def volume_get_all_by_host(context, host):
def volume_get_all_by_instance_uuid(context, instance_uuid):
result = model_query(context, models.Volume, read_deleted="no").\
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type')).\
filter_by(instance_uuid=instance_uuid).\
all()
@@ -1255,11 +1273,19 @@ def volume_update(context, volume_id, values):
with session.begin():
metadata = values.get('metadata')
if metadata is not None:
_volume_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
@@ -1269,16 +1295,14 @@ def volume_update(context, volume_id, values):
####################
def _volume_metadata_get_query(context, volume_id, session=None):
return model_query(context, models.VolumeMetadata,
session=session, read_deleted="no").\
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
return model_query(context, model, session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
@require_context
@require_volume_exists
def _volume_metadata_get(context, volume_id, session=None):
rows = _volume_metadata_get_query(context, volume_id, session).all()
def _volume_x_metadata_get(context, volume_id, model, session=None):
rows = _volume_x_metadata_get_query(context, volume_id, model,
session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
@@ -1286,56 +1310,34 @@ def _volume_metadata_get(context, volume_id, session=None):
return result
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
return _volume_metadata_get(context, volume_id)
@require_context
@require_volume_exists
def volume_metadata_delete(context, volume_id, key):
_volume_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _volume_metadata_get_item(context, volume_id, key, session=None):
result = _volume_metadata_get_query(context, volume_id, session=session).\
def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec,
session=None):
result = _volume_x_metadata_get_query(context, volume_id,
model, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeMetadataNotFound(metadata_key=key,
volume_id=volume_id)
raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
@require_context
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key):
return _volume_metadata_get_item(context, volume_id, key)
@require_context
@require_volume_exists
def _volume_metadata_update(context, volume_id, metadata, delete,
session=None):
def _volume_x_metadata_update(context, volume_id, metadata, delete,
model, notfound_exec, session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _volume_metadata_get(context, volume_id,
session)
original_metadata = _volume_x_metadata_get(context, volume_id,
model, session=session)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = _volume_metadata_get_item(context, volume_id,
meta_key, session)
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
notfound_exec,
session=session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
@@ -1349,10 +1351,12 @@ def _volume_metadata_update(context, volume_id, metadata, delete,
item = {"value": meta_value}
try:
meta_ref = _volume_metadata_get_item(context, volume_id,
meta_key, session)
except exception.VolumeMetadataNotFound as e:
meta_ref = models.VolumeMetadata()
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
notfound_exec,
session=session)
except notfound_exec:
meta_ref = model()
item.update({"key": meta_key, "volume_id": volume_id})
meta_ref.update(item)
@@ -1361,10 +1365,110 @@ def _volume_metadata_update(context, volume_id, metadata, delete,
return metadata
def _volume_user_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
def _volume_user_metadata_get_item(context, volume_id, key, session=None):
return _volume_x_metadata_get_item(context, volume_id, key,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key):
return _volume_user_metadata_get_item(context, volume_id, key)
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
return _volume_user_metadata_get(context, volume_id)
@require_context
@require_volume_exists
def volume_metadata_delete(context, volume_id, key):
_volume_user_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
@require_volume_exists
def volume_metadata_update(context, volume_id, metadata, delete):
return _volume_metadata_update(context, volume_id, metadata, delete)
return _volume_user_metadata_update(context, volume_id, metadata, delete)
###################
def _volume_admin_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeAdminMetadata,
session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeAdminMetadata, session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeAdminMetadata,
exception.VolumeAdminMetadataNotFound,
session=session)
@require_admin_context
@require_volume_exists
def volume_admin_metadata_get(context, volume_id):
return _volume_admin_metadata_get(context, volume_id)
@require_admin_context
@require_volume_exists
def volume_admin_metadata_delete(context, volume_id, key):
_volume_admin_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
@require_volume_exists
def volume_admin_metadata_update(context, volume_id, metadata, delete):
return _volume_admin_metadata_update(context, volume_id, metadata, delete)
###################

View File

@@ -1,6 +1,6 @@
BEGIN TRANSACTION;
CREATE TABLE volumes_v12 (
CREATE TABLE volumes_v13 (
created_at DATETIME,
updated_at DATETIME,
deleted_at DATETIME,
@@ -29,10 +29,11 @@ CREATE TABLE volumes_v12 (
volume_type_id VARCHAR(36),
source_volid VARCHAR(36),
bootable BOOLEAN,
provider_geometry VARCHAR(255),
PRIMARY KEY (id)
);
INSERT INTO volumes_v12
INSERT INTO volumes_v13
SELECT created_at,
updated_at,
deleted_at,
@@ -60,9 +61,10 @@ INSERT INTO volumes_v12
provider_auth,
volume_type_id,
source_volid,
bootable
bootable,
provider_geometry
FROM volumes;
DROP TABLE volumes;
ALTER TABLE volumes_v12 RENAME TO volumes;
ALTER TABLE volumes_v13 RENAME TO volumes;
COMMIT;

View File

@@ -0,0 +1,60 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import Integer, MetaData, String, Table, ForeignKey
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
_volumes = Table('volumes', meta, autoload=True)
# New table
volume_admin_metadata = Table(
'volume_admin_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('id', Integer, primary_key=True, nullable=False),
Column('volume_id', String(length=36), ForeignKey('volumes.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
mysql_engine='InnoDB'
)
try:
volume_admin_metadata.create()
except Exception:
LOG.error(_("Table |%s| not created!"), repr(volume_admin_metadata))
raise
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
volume_admin_metadata = Table('volume_admin_metadata',
meta,
autoload=True)
try:
volume_admin_metadata.drop()
except Exception:
LOG.error(_("volume_admin_metadata table not dropped"))

View File

@@ -146,6 +146,20 @@ class VolumeMetadata(BASE, CinderBase):
'VolumeMetadata.deleted == False)')
class VolumeAdminMetadata(BASE, CinderBase):
"""Represents a administrator metadata key/value pair for a volume."""
__tablename__ = 'volume_admin_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'), nullable=False)
volume = relationship(Volume, backref="volume_admin_metadata",
foreign_keys=volume_id,
primaryjoin='and_('
'VolumeAdminMetadata.volume_id == Volume.id,'
'VolumeAdminMetadata.deleted == False)')
class VolumeTypes(BASE, CinderBase):
"""Represent possible volume_types of volumes offered."""
__tablename__ = "volume_types"
@@ -477,6 +491,7 @@ def register_models():
Service,
Volume,
VolumeMetadata,
VolumeAdminMetadata,
SnapshotMetadata,
Transfer,
VolumeTypeExtraSpecs,

View File

@@ -139,6 +139,11 @@ class InvalidSourceVolume(Invalid):
message = _("Invalid source volume %(reason)s.")
class InvalidVolumeAttachMode(Invalid):
message = _("Invalid attaching mode '%(mode)s' for "
"volume %(volume_id)s.")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
@@ -229,6 +234,11 @@ class VolumeMetadataNotFound(NotFound):
"key %(metadata_key)s.")
class VolumeAdminMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no administration metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"

View File

@@ -294,9 +294,9 @@ class AdminActionsTest(test.TestCase):
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint)
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
mountpoint, 'rw')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEquals(volume['status'], 'in-use')
@@ -304,6 +304,15 @@ class AdminActionsTest(test.TestCase):
self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], mountpoint)
self.assertEquals(volume['attach_status'], 'attached')
admin_metadata = volume['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'False')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'rw')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
self.assertEquals(conn_info['data']['access_mode'], 'rw')
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
@@ -320,8 +329,13 @@ class AdminActionsTest(test.TestCase):
# status changed to 'available'
self.assertEquals(volume['status'], 'available')
self.assertEquals(volume['instance_uuid'], None)
self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], None)
self.assertEquals(volume['attach_status'], 'detached')
admin_metadata = volume['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'False')
# cleanup
svc.stop()
@@ -335,10 +349,9 @@ class AdminActionsTest(test.TestCase):
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
host_name = 'fake-host'
self.volume_api.attach(ctx, volume, None, host_name, mountpoint)
self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'ro')
# volume is attached
volume = db.volume_get(ctx, volume['id'])
self.assertEquals(volume['status'], 'in-use')
@@ -346,6 +359,15 @@ class AdminActionsTest(test.TestCase):
self.assertEquals(volume['attached_host'], host_name)
self.assertEquals(volume['mountpoint'], mountpoint)
self.assertEquals(volume['attach_status'], 'attached')
admin_metadata = volume['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'False')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'ro')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
self.assertEquals(conn_info['data']['access_mode'], 'ro')
# build request to force detach
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume['id'])
req.method = 'POST'
@@ -365,6 +387,10 @@ class AdminActionsTest(test.TestCase):
self.assertEquals(volume['attached_host'], None)
self.assertEquals(volume['mountpoint'], None)
self.assertEquals(volume['attach_status'], 'detached')
admin_metadata = volume['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'False')
# cleanup
svc.stop()
@@ -379,16 +405,28 @@ class AdminActionsTest(test.TestCase):
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None, mountpoint)
self.volume_api.attach(ctx, volume, stubs.FAKE_UUID, None,
mountpoint, 'rw')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
self.assertEquals(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
None,
mountpoint)
mountpoint,
'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
fakes.get_fake_uuid(),
None,
mountpoint,
'ro')
# cleanup
svc.stop()
@@ -403,17 +441,28 @@ class AdminActionsTest(test.TestCase):
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.reserve_volume(ctx, volume)
self.volume_api.initialize_connection(ctx, volume, connector)
mountpoint = '/dev/vbd'
host_name = 'fake_host'
self.volume_api.attach(ctx, volume, None, host_name, mountpoint)
self.volume_api.attach(ctx, volume, None, host_name, mountpoint, 'rw')
conn_info = self.volume_api.initialize_connection(ctx,
volume, connector)
conn_info['data']['access_mode'] = 'rw'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
None,
host_name,
mountpoint)
mountpoint,
'rw')
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
None,
host_name,
mountpoint,
'ro')
# cleanup
svc.stop()
@@ -439,10 +488,8 @@ class AdminActionsTest(test.TestCase):
# current status is available
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': ''})
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
self.volume_api.initialize_connection(ctx, volume, connector)
values = {'status': 'attaching',
'instance_uuid': fakes.get_fake_uuid()}
db.volume_update(ctx, volume['id'], values)
@@ -453,7 +500,34 @@ class AdminActionsTest(test.TestCase):
volume,
stubs.FAKE_UUID,
None,
mountpoint)
mountpoint,
'rw')
# cleanup
svc.stop()
def test_attach_attaching_volume_with_different_mode(self):
"""Test that attaching volume reserved for another mode fails."""
# admin context
ctx = context.RequestContext('admin', 'fake', True)
# current status is available
volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
'provider_location': ''})
# start service to handle rpc messages for attach requests
svc = self.start_service('volume', host='test')
values = {'status': 'attaching',
'instance_uuid': fakes.get_fake_uuid()}
db.volume_update(ctx, volume['id'], values)
db.volume_admin_metadata_update(ctx, volume['id'],
{"attached_mode": 'rw'}, False)
mountpoint = '/dev/vbd'
self.assertRaises(exception.InvalidVolume,
self.volume_api.attach,
ctx,
volume,
values['instance_uuid'],
None,
mountpoint,
'ro')
# cleanup
svc.stop()

View File

@@ -95,7 +95,8 @@ class VolumeActionsTest(test.TestCase):
def test_attach_to_instance(self):
body = {'os-attach': {'instance_uuid': 'fake',
'mountpoint': '/dev/vdc'}}
'mountpoint': '/dev/vdc',
'mode': 'rw'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
@@ -105,6 +106,7 @@ class VolumeActionsTest(test.TestCase):
self.assertEqual(res.status_int, 202)
def test_attach_to_host(self):
# using 'read-write' mode attach volume by default
body = {'os-attach': {'host_name': 'fake_host',
'mountpoint': '/dev/vdc'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
@@ -136,6 +138,26 @@ class VolumeActionsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
# Invalid request to attach volume with an invalid mode
body = {'os-attach': {'instance_uuid': 'fake',
'mountpoint': '/dev/vdc',
'mode': 'rr'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
req.method = "POST"
req.headers["content-type"] = "application/json"
req.body = jsonutils.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
body = {'os-attach': {'host_name': 'fake_host',
'mountpoint': '/dev/vdc',
'mode': 'ww'}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
req.method = "POST"
req.headers["content-type"] = "application/json"
req.body = jsonutils.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_begin_detaching(self):
def fake_begin_detaching(*args, **kwargs):
return {}
@@ -181,6 +203,21 @@ class VolumeActionsTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_update_readonly_flag(self):
def fake_update_readonly_flag(*args, **kwargs):
return {}
self.stubs.Set(volume.API, 'update_readonly_flag',
fake_update_readonly_flag)
body = {'os-update_readonly_flag': {'readonly': True}}
req = webob.Request.blank('/v2/fake/volumes/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def stub_volume_get(self, context, volume_id):
volume = stubs.stub_volume(volume_id)

View File

@@ -22,6 +22,7 @@ from lxml import etree
import webob
from cinder import context
from cinder import db
from cinder import test
from cinder.tests.api import fakes
from cinder import volume
@@ -64,6 +65,8 @@ class VolumeHostAttributeTest(test.TestCase):
super(VolumeHostAttributeTest, self).setUp()
self.stubs.Set(volume.API, 'get', fake_volume_get)
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.stubs.Set(db, 'volume_get', fake_volume_get)
self.UUID = uuid.uuid4()
def test_get_volume_allowed(self):

View File

@@ -24,6 +24,7 @@ import webob
from cinder.api import common
from cinder.api.openstack.wsgi import MetadataXMLDeserializer
from cinder.api.openstack.wsgi import XMLDeserializer
from cinder import db
from cinder import test
from cinder.tests.api import fakes
from cinder import volume
@@ -71,6 +72,7 @@ class VolumeImageMetadataTest(test.TestCase):
self.stubs.Set(volume.API, 'get_all', fake_volume_get_all)
self.stubs.Set(volume.API, 'get_volume_image_metadata',
fake_get_volume_image_metadata)
self.stubs.Set(db, 'volume_get', fake_volume_get)
self.UUID = uuid.uuid4()
def _make_request(self, url):

View File

@@ -32,7 +32,9 @@ def stub_volume(id, **kwargs):
'size': 1,
'availability_zone': 'fakeaz',
'instance_uuid': 'fakeuuid',
'attached_host': None,
'mountpoint': '/',
'attached_mode': 'rw',
'status': 'fakestatus',
'migration_status': None,
'attach_status': 'attached',
@@ -45,7 +47,8 @@ def stub_volume(id, **kwargs):
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_type': {'name': 'vol_type_name'}}
'volume_type': {'name': 'vol_type_name'},
'readonly': 'False'}
volume.update(kwargs)
return volume

View File

@@ -65,6 +65,7 @@ class VolumeApiTest(test.TestCase):
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
@@ -91,7 +92,8 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
@@ -162,7 +164,8 @@ class VolumeApiTest(test.TestCase):
'image_id': test_id,
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
@@ -220,17 +223,17 @@ class VolumeApiTest(test.TestCase):
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/',
'device': '/'
}],
'bootable': False,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
}}
'size': 1}}
self.assertEquals(res_dict, expected)
def test_volume_update_metadata(self):
@@ -251,16 +254,18 @@ class VolumeApiTest(test.TestCase):
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/',
'device': '/'
}],
'bootable': False,
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": 2000},
'metadata': {"qos_max_iops": 2000,
"readonly": "False",
"attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'size': 1
}}
self.assertEquals(res_dict, expected)
@@ -308,7 +313,8 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
@@ -333,7 +339,8 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
@@ -477,7 +484,8 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
@@ -501,7 +509,7 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),
@@ -530,7 +538,8 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1,
1, 1, 1),

View File

@@ -45,13 +45,16 @@ def stub_volume(id, **kwargs):
'snapshot_id': None,
'source_volid': None,
'volume_type_id': '3e196c20-3c06-11e2-81c1-0800200c9a66',
'volume_metadata': [],
'volume_admin_metadata': [{'key': 'attached_mode', 'value': 'rw'},
{'key': 'readonly', 'value': 'False'}],
'bootable': False,
'volume_type': {'name': 'vol_type_name'}}
volume.update(kwargs)
if kwargs.get('volume_glance_metadata', None):
volume['bootable'] = True
if kwargs.get('attach_status') == 'detached':
del volume['volume_admin_metadata'][0]
return volume
@@ -100,6 +103,10 @@ def stub_volume_get_notfound(self, context, volume_id):
raise exc.NotFound
def stub_volume_get_db(context, volume_id):
return stub_volume(volume_id)
def stub_volume_get_all(context, search_opts=None, marker=None, limit=None,
sort_key='created_at', sort_dir='desc'):
return [stub_volume(100, project_id='fake'),

View File

@@ -244,7 +244,7 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
@@ -286,7 +286,9 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": 2000},
'metadata': {"qos_max_iops": 2000,
"readonly": "False",
"attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
@@ -380,7 +382,7 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
@@ -681,7 +683,7 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'attached_mode': 'rw', 'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
@@ -718,7 +720,7 @@ class VolumeApiTest(test.TestCase):
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,

View File

@@ -35,9 +35,17 @@ class FakeISCSIDriver(lvm.LVMISCSIDriver):
pass
def initialize_connection(self, volume, connector):
volume_metadata = {}
for metadata in volume['volume_admin_metadata']:
volume_metadata[metadata['key']] = metadata['value']
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
return {
'driver_volume_type': 'iscsi',
'data': {}
'data': {'access_mode': access_mode}
}
def terminate_connection(self, volume, connector, **kwargs):

View File

@@ -7,10 +7,13 @@
"volume:get": [],
"volume:get_all": [],
"volume:get_volume_metadata": [],
"volume:delete": [],
"volume:update": [],
"volume:delete_volume_metadata": [],
"volume:update_volume_metadata": [],
"volume:get_volume_admin_metadata": [["rule:admin_api"]],
"volume:delete_volume_admin_metadata": [["rule:admin_api"]],
"volume:update_volume_admin_metadata": [["rule:admin_api"]],
"volume:delete": [],
"volume:update": [],
"volume:attach": [],
"volume:detach": [],
"volume:reserve_volume": [],
@@ -29,6 +32,7 @@
"volume:extend": [],
"volume:migrate_volume": [["rule:admin_api"]],
"volume:migrate_volume_completion": [["rule:admin_api"]],
"volume:update_readonly_flag": [],
"volume_extension:volume_admin_actions:reset_status": [["rule:admin_api"]],
"volume_extension:snapshot_admin_actions:reset_status": [["rule:admin_api"]],

View File

@@ -960,3 +960,45 @@ class TestMigrations(test.TestCase):
metadata,
autoload=True)
self.assertTrue('migration_status' not in volumes.c)
def test_migration_020(self):
"""Test adding volume_admin_metadata table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.INIT_VERSION)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
volume_admin_metadata = sqlalchemy.Table('volume_admin_metadata',
metadata,
autoload=True)
self.assertTrue(isinstance(volume_admin_metadata.c.created_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(volume_admin_metadata.c.updated_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(volume_admin_metadata.c.deleted_at.type,
sqlalchemy.types.DATETIME))
self.assertTrue(isinstance(volume_admin_metadata.c.deleted.type,
sqlalchemy.types.BOOLEAN))
self.assertTrue(isinstance(volume_admin_metadata.c.deleted.type,
sqlalchemy.types.BOOLEAN))
self.assertTrue(isinstance(volume_admin_metadata.c.id.type,
sqlalchemy.types.INTEGER))
self.assertTrue(isinstance(volume_admin_metadata.c.volume_id.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(volume_admin_metadata.c.key.type,
sqlalchemy.types.VARCHAR))
self.assertTrue(isinstance(volume_admin_metadata.c.value.type,
sqlalchemy.types.VARCHAR))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 19)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))

View File

@@ -119,8 +119,9 @@ class BaseVolumeTestCase(test.TestCase):
@staticmethod
def _create_volume(size=0, snapshot_id=None, image_id=None,
source_volid=None, metadata=None, status="creating",
migration_status=None, availability_zone=None):
source_volid=None, metadata=None, admin_metadata=None,
status="creating", migration_status=None,
availability_zone=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
@@ -136,6 +137,8 @@ class BaseVolumeTestCase(test.TestCase):
vol['host'] = CONF.host
if metadata is not None:
vol['metadata'] = metadata
if admin_metadata is not None:
vol['admin_metadata'] = admin_metadata
return db.volume_create(context.get_admin_context(), vol)
@@ -551,22 +554,32 @@ class VolumeTestCase(BaseVolumeTestCase):
except TypeError:
pass
def test_run_attach_detach_volume(self):
def test_run_attach_detach_volume_for_instance(self):
"""Make sure volume can be attached and detached from instance."""
mountpoint = "/dev/sdf"
# attach volume to the instance then to detach
instance_uuid = '12345678-1234-5678-1234-567812345678'
volume = self._create_volume()
volume = self._create_volume(admin_metadata={'readonly': 'True'})
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint)
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEquals(conn_info['data']['access_mode'], 'ro')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
@@ -582,12 +595,14 @@ class VolumeTestCase(BaseVolumeTestCase):
self.context,
volume_id)
# attach volume to the host then to detach
volume = self._create_volume()
def test_run_attach_detach_volume_for_host(self):
"""Make sure volume can be attached and detached from host."""
mountpoint = "/dev/sdf"
volume = self._create_volume(admin_metadata={'readonly': 'False'})
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint)
'fake_host', mountpoint, 'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
@@ -595,6 +610,16 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertEqual(vol['instance_uuid'], None)
# sanitized, conforms to RFC-952 and RFC-1123 specs.
self.assertEqual(vol['attached_host'], 'fake-host')
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'False')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'rw')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEquals(conn_info['data']['access_mode'], 'rw')
self.assertRaises(exception.VolumeAttached,
self.volume.delete_volume,
@@ -610,6 +635,167 @@ class VolumeTestCase(BaseVolumeTestCase):
self.context,
volume_id)
def test_run_attach_detach_volume_with_attach_mode(self):
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = self._create_volume(admin_metadata={'readonly': 'True'})
volume_id = volume['id']
db.volume_update(self.context, volume_id, {'status': 'available',
'mountpoint': None,
'instance_uuid': None,
'attached_host': None,
'attached_mode': None})
self.volume.attach_volume(self.context, volume_id, instance_uuid,
None, mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEquals(conn_info['data']['access_mode'], 'ro')
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.volume.attach_volume(self.context, volume_id, None,
'fake_host', mountpoint, 'ro')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['attached_host'], 'fake-host')
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'ro')
connector = {'initiator': 'iqn.2012-07.org.fake:01'}
conn_info = self.volume.initialize_connection(self.context,
volume_id, connector)
self.assertEquals(conn_info['data']['access_mode'], 'ro')
self.volume.detach_volume(self.context, volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
self.assertEqual(vol['mountpoint'], None)
self.assertEqual(vol['instance_uuid'], None)
self.assertEqual(vol['attached_host'], None)
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
def test_run_manager_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = self._create_volume(admin_metadata={'readonly': 'True'})
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "error_attaching")
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'rw')
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
self.volume.attach_volume,
self.context,
volume_id,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "error_attaching")
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 2)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
self.assertEquals(admin_metadata[1]['key'], 'attached_mode')
self.assertEquals(admin_metadata[1]['value'], 'rw')
def test_run_api_attach_detach_volume_with_wrong_attach_mode(self):
# Not allow using 'read-write' mode attach readonly volume
instance_uuid = '12345678-1234-5678-1234-567812345678'
mountpoint = "/dev/sdf"
volume = self._create_volume(admin_metadata={'readonly': 'True'})
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
volume_api = cinder.volume.api.API()
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
instance_uuid,
None,
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
db.volume_update(self.context, volume_id, {'status': 'available'})
self.assertRaises(exception.InvalidVolumeAttachMode,
volume_api.attach,
self.context,
volume,
None,
'fake_host',
mountpoint,
'rw')
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['attach_status'], "detached")
admin_metadata = vol['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'True')
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
@@ -1419,6 +1605,37 @@ class VolumeTestCase(BaseVolumeTestCase):
self.assertEquals(volume['host'], 'newhost')
self.assertEquals(volume['migration_status'], None)
def test_update_volume_readonly_flag(self):
"""Test volume readonly flag can be updated at API level."""
# create a volume and assign to host
volume = self._create_volume(admin_metadata={'readonly': 'True'})
self.volume.create_volume(self.context, volume['id'])
volume['status'] = 'in-use'
volume_api = cinder.volume.api.API()
# Update fails when status != available
self.assertRaises(exception.InvalidVolume,
volume_api.update_readonly_flag,
self.context,
volume,
False)
volume['status'] = 'available'
# works when volume in 'available' status
volume_api.update_readonly_flag(self.context, volume, False)
volume = db.volume_get(context.get_admin_context(), volume['id'])
self.assertEquals(volume['status'], 'available')
admin_metadata = volume['volume_admin_metadata']
self.assertEquals(len(admin_metadata), 1)
self.assertEquals(admin_metadata[0]['key'], 'readonly')
self.assertEquals(admin_metadata[0]['value'], 'False')
# clean up
self.volume.delete_volume(self.context, volume['id'])
class CopyVolumeToImageTestCase(BaseVolumeTestCase):
def fake_local_path(self, volume):
@@ -1869,7 +2086,8 @@ class ISCSITestCase(DriverTestCase):
def test_get_iscsi_properties(self):
volume = {"provider_location": '',
"id": "0",
"provider_auth": "a b c"}
"provider_auth": "a b c",
"attached_mode": "rw"}
iscsi_driver = driver.ISCSIDriver()
iscsi_driver._do_iscsi_discovery = lambda v: "0.0.0.0:0000,0 iqn:iqn 0"
result = iscsi_driver._get_iscsi_properties(volume)

View File

@@ -155,7 +155,8 @@ class VolumeRpcAPITestCase(test.TestCase):
instance_uuid='fake_uuid',
host_name=None,
mountpoint='fake_mountpoint',
version='1.7')
mode='ro',
version='1.11')
def test_attach_volume_to_host(self):
self._test_volume_api('attach_volume',
@@ -164,7 +165,8 @@ class VolumeRpcAPITestCase(test.TestCase):
instance_uuid=None,
host_name='fake_host',
mountpoint='fake_mountpoint',
version='1.7')
mode='rw',
version='1.11')
def test_detach_volume(self):
self._test_volume_api('detach_volume',

View File

@@ -410,12 +410,26 @@ class API(base.Base):
self.update(context, volume, {"status": "in-use"})
@wrap_check_policy
def attach(self, context, volume, instance_uuid, host_name, mountpoint):
def attach(self, context, volume, instance_uuid, host_name,
mountpoint, mode):
volume_metadata = self.get_volume_admin_metadata(context.elevated(),
volume)
if 'readonly' not in volume_metadata:
# NOTE(zhiyan): set a default value for read-only flag to metadata.
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': 'False'})
volume_metadata['readonly'] = 'False'
if volume_metadata['readonly'] == 'True' and mode != 'ro':
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume['id'])
return self.volume_rpcapi.attach_volume(context,
volume,
instance_uuid,
host_name,
mountpoint)
mountpoint,
mode)
@wrap_check_policy
def detach(self, context, volume):
@@ -592,7 +606,8 @@ class API(base.Base):
self._check_metadata_properties(context, _metadata)
self.db.volume_metadata_update(context, volume['id'], _metadata, True)
self.db.volume_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
@@ -607,6 +622,42 @@ class API(base.Base):
return i['value']
return None
@wrap_check_policy
def get_volume_admin_metadata(self, context, volume):
"""Get all administration metadata associated with a volume."""
rv = self.db.volume_admin_metadata_get(context, volume['id'])
return dict(rv.iteritems())
@wrap_check_policy
def delete_volume_admin_metadata(self, context, volume, key):
"""Delete the given administration metadata item from a volume."""
self.db.volume_admin_metadata_delete(context, volume['id'], key)
@wrap_check_policy
def update_volume_admin_metadata(self, context, volume, metadata,
delete=False):
"""Updates or creates volume administration metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig_meta = self.get_volume_admin_metadata(context, volume)
if delete:
_metadata = metadata
else:
_metadata = orig_meta.copy()
_metadata.update(metadata)
self._check_metadata_properties(context, _metadata)
self.db.volume_admin_metadata_update(context, volume['id'],
_metadata, delete)
# TODO(jdg): Implement an RPC call for drivers that may use this info
return _metadata
def get_snapshot_metadata(self, context, snapshot):
"""Get all metadata associated with a snapshot."""
rv = self.db.snapshot_metadata_get(context, snapshot['id'])
@@ -786,6 +837,14 @@ class API(base.Base):
return self.volume_rpcapi.migrate_volume_completion(context, volume,
new_volume, error)
@wrap_check_policy
def update_readonly_flag(self, context, volume, flag):
if volume['status'] != 'available':
msg = _('Volume status must be available to update readonly flag.')
raise exception.InvalidVolume(reason=msg)
self.update_volume_admin_metadata(context.elevated(), volume,
{'readonly': str(flag)})
class HostAPI(base.Base):
def __init__(self):

View File

@@ -484,6 +484,9 @@ class ISCSIDriver(VolumeDriver):
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
:access_mode: the volume access mode allow client used
('rw' or 'ro' currently supported)
"""
properties = {}
@@ -580,6 +583,7 @@ class ISCSIDriver(VolumeDriver):
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
'access_mode': 'rw'
}
}
@@ -661,7 +665,7 @@ class FakeISCSIDriver(ISCSIDriver):
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iscsi',
'data': {}
'data': {'access_mode': 'rw'}
}
def terminate_connection(self, volume, connector, **kwargs):
@@ -968,6 +972,7 @@ class FibreChannelDriver(VolumeDriver):
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
}
}
@@ -979,6 +984,7 @@ class FibreChannelDriver(VolumeDriver):
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
}
}

View File

@@ -130,7 +130,7 @@ MAPPING = {
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.10'
RPC_API_VERSION = '1.11'
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
@@ -392,12 +392,14 @@ class VolumeManager(manager.SchedulerDependentManager):
return True
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint):
mountpoint, mode):
"""Updates db to show volume is attached"""
@utils.synchronized(volume_id, external=True)
def do_attach():
# check the volume status before attaching
volume = self.db.volume_get(context, volume_id)
volume_metadata = self.db.volume_admin_metadata_get(
context.elevated(), volume_id)
if volume['status'] == 'attaching':
if (volume['instance_uuid'] and volume['instance_uuid'] !=
instance_uuid):
@@ -407,6 +409,10 @@ class VolumeManager(manager.SchedulerDependentManager):
host_name):
msg = _("being attached by another host")
raise exception.InvalidVolume(reason=msg)
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
msg = _("being attached by different mode")
raise exception.InvalidVolume(reason=msg)
elif volume['status'] != "available":
msg = _("status must be available")
raise exception.InvalidVolume(reason=msg)
@@ -414,17 +420,18 @@ class VolumeManager(manager.SchedulerDependentManager):
# TODO(jdg): attach_time column is currently varchar
# we should update this to a date-time object
# also consider adding detach_time?
now = timeutils.strtime()
new_status = 'attaching'
self.db.volume_update(context, volume_id,
{"instance_uuid": instance_uuid,
"attached_host": host_name,
"status": new_status,
"attach_time": now})
"status": "attaching",
"attach_time": timeutils.strtime()})
self.db.volume_admin_metadata_update(context.elevated(),
volume_id,
{"attached_mode": mode},
False)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
self.db.volume_update(context,
volume_id,
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidUUID(uuid=instance_uuid)
@@ -432,6 +439,12 @@ class VolumeManager(manager.SchedulerDependentManager):
host_name) if host_name else None
volume = self.db.volume_get(context, volume_id)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume_id)
try:
self.driver.attach_volume(context,
volume,
@@ -440,8 +453,7 @@ class VolumeManager(manager.SchedulerDependentManager):
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_id,
self.db.volume_update(context, volume_id,
{'status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
@@ -466,6 +478,8 @@ class VolumeManager(manager.SchedulerDependentManager):
{'status': 'error_detaching'})
self.db.volume_detached(context.elevated(), volume_id)
self.db.volume_admin_metadata_delete(context.elevated(), volume_id,
'attached_mode')
# Check for https://bugs.launchpad.net/cinder/+bug/1065702
volume = self.db.volume_get(context, volume_id)
@@ -540,12 +554,12 @@ class VolumeManager(manager.SchedulerDependentManager):
json in various places, so it should not contain any non-json
data types.
"""
volume_ref = self.db.volume_get(context, volume_id)
volume = self.db.volume_get(context, volume_id)
self.driver.validate_connector(connector)
conn_info = self.driver.initialize_connection(volume_ref, connector)
conn_info = self.driver.initialize_connection(volume, connector)
# Add qos_specs to connection info
typeid = volume_ref['volume_type_id']
typeid = volume['volume_type_id']
specs = {}
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
@@ -556,6 +570,17 @@ class VolumeManager(manager.SchedulerDependentManager):
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = self.db.volume_admin_metadata_get(context.elevated(),
volume_id)
if conn_info['data'].get('access_mode') is None:
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):

View File

@@ -45,6 +45,8 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
'''
BASE_RPC_API_VERSION = '1.0'
@@ -91,16 +93,17 @@ class VolumeAPI(cinder.openstack.common.rpc.proxy.RpcProxy):
topic=rpc.queue_get_for(ctxt, self.topic, host))
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint):
mountpoint, mode):
return self.call(ctxt, self.make_msg('attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
mountpoint=mountpoint),
mountpoint=mountpoint,
mode=mode),
topic=rpc.queue_get_for(ctxt,
self.topic,
volume['host']),
version='1.7')
version='1.11')
def detach_volume(self, ctxt, volume):
return self.call(ctxt, self.make_msg('detach_volume',

View File

@@ -8,9 +8,13 @@
"volume:create": [],
"volume:get_all": [],
"volume:get_volume_metadata": [],
"volume:get_volume_admin_metadata": [["rule:admin_api"]],
"volume:delete_volume_admin_metadata": [["rule:admin_api"]],
"volume:update_volume_admin_metadata": [["rule:admin_api"]],
"volume:get_snapshot": [],
"volume:get_all_snapshots": [],
"volume:extend": [],
"volume:update_readonly_flag": [],
"volume_extension:types_manage": [["rule:admin_api"]],
"volume_extension:types_extra_specs": [["rule:admin_api"]],
@@ -47,5 +51,4 @@
"backup:get": [],
"backup:get_all": [],
"backup:restore": []
}