OpenStack Block Storage (Cinder)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

743 lines
33 KiB

# Copyright 2015 SimpliVity Corp.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import versionutils
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import cleanable
from cinder.objects import fields as c_fields
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
class MetadataObject(dict):
# This is a wrapper class that simulates SQLAlchemy (.*)Metadata objects to
# maintain compatibility with older representations of Volume that some
# drivers rely on. This is helpful in transition period while some driver
# methods are invoked with volume versioned object and some SQLAlchemy
# object or dict.
def __init__(self, key=None, value=None):
super(MetadataObject, self).__init__()
self.key = key
self.value = value
def __getattr__(self, name):
if name in self:
return self[name]
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
class Volume(cleanable.CinderCleanableObject, base.CinderObject,
base.CinderObjectDictCompat, base.CinderComparableObject,
# Version 1.0: Initial version
# Version 1.1: Added metadata, admin_metadata, volume_attachment, and
# volume_type
# Version 1.2: Added glance_metadata, consistencygroup and snapshots
# Version 1.3: Added finish_volume_migration()
# Version 1.4: Added cluster fields
# Version 1.5: Added group
# Version 1.6: This object is now cleanable (adds rows to workers table)
# Version 1.7: Added service_uuid
# Version 1.8: Added shared_targets
# Version 1.9: Added use_quota
VERSION = '1.9'
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
'volume_type', 'volume_attachment', 'consistencygroup',
'snapshots', 'cluster', 'group')
# NOTE: When adding a field obj_make_compatible needs to be updated
fields = {
# id is the user facing UUID that should be passed to API calls
'id': fields.UUIDField(),
# _name_id is the real volume's UUID that should be used by the driver
# when it is set. This is used when migrating a volume. Property
# name_id is provided for convenience.
'_name_id': fields.UUIDField(nullable=True),
'ec2_id': fields.UUIDField(nullable=True),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
# TODO: (Y release) Change nullable to False
'use_quota': fields.BooleanField(default=True, nullable=True),
'snapshot_id': fields.UUIDField(nullable=True),
'cluster_name': fields.StringField(nullable=True),
'cluster': fields.ObjectField('Cluster', nullable=True,
'host': fields.StringField(nullable=True),
'size': fields.IntegerField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
'migration_status': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'provider_id': fields.StringField(nullable=True),
'provider_location': fields.StringField(nullable=True),
'provider_auth': fields.StringField(nullable=True),
'provider_geometry': fields.StringField(nullable=True),
'volume_type_id': fields.UUIDField(nullable=True),
'source_volid': fields.UUIDField(nullable=True),
'encryption_key_id': fields.UUIDField(nullable=True),
'consistencygroup_id': fields.UUIDField(nullable=True),
'group_id': fields.UUIDField(nullable=True),
'deleted': fields.BooleanField(default=False, nullable=True),
'bootable': fields.BooleanField(default=False, nullable=True),
'multiattach': fields.BooleanField(default=False, nullable=True),
'replication_status': fields.StringField(nullable=True),
'replication_extended_status': fields.StringField(nullable=True),
'replication_driver_data': fields.StringField(nullable=True),
'previous_status': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(nullable=True),
'admin_metadata': fields.DictOfStringsField(nullable=True),
'glance_metadata': fields.DictOfStringsField(nullable=True),
'volume_type': fields.ObjectField('VolumeType', nullable=True),
'volume_attachment': fields.ObjectField('VolumeAttachmentList',
'consistencygroup': fields.ObjectField('ConsistencyGroup',
'snapshots': fields.ObjectField('SnapshotList', nullable=True),
'group': fields.ObjectField('Group', nullable=True),
'service_uuid': fields.StringField(nullable=True),
'shared_targets': fields.BooleanField(default=True, nullable=True),
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
# usually part of the model
obj_extra_fields = ['name', 'name_id', 'volume_metadata',
'volume_admin_metadata', 'volume_glance_metadata']
def _get_expected_attrs(cls, context, *args, **kwargs):
expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs',
if context.is_admin:
return expected_attrs
def name_id(self):
"""Actual volume's UUID for driver usage.
There may be two different UUIDs for the same volume, the user facing
one, and the one the driver should be using.
When a volume is created these two are the same, but when doing a
generic migration (create new volume, then copying data) they will be
different if we were unable to rename the new volume in the final
migration steps.
So the volume will have been created using the new volume's UUID and
the driver will have to look for it using that UUID, but the user on
the other hand will keep referencing the volume with the original UUID.
This property facilitates using the right UUID in the driver's code.
return if not self._name_id else self._name_id
def name_id(self, value):
self._name_id = value
def name(self):
return CONF.volume_name_template % self.name_id
# TODO(dulek): Three properties below are for compatibility with dict
# representation of volume. The format there is different (list of
# SQLAlchemy models) so we need a conversion. Anyway - these should be
# removed when we stop this class from deriving from DictObjectCompat.
def volume_metadata(self):
md = [MetadataObject(k, v) for k, v in self.metadata.items()]
return md
def volume_metadata(self, value):
md = {d['key']: d['value'] for d in value}
self.metadata = md
def volume_admin_metadata(self):
md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()]
return md
def volume_admin_metadata(self, value):
md = {d['key']: d['value'] for d in value}
self.admin_metadata = md
def admin_metadata_update(self, metadata, delete, add=True, update=True):
new_metadata = db.volume_admin_metadata_update(self._context,,
metadata, delete, add,
self.admin_metadata = new_metadata
def volume_glance_metadata(self):
md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()]
return md
def volume_glance_metadata(self, value):
md = {d['key']: d['value'] for d in value}
self.glance_metadata = md
def __init__(self, *args, **kwargs):
super(Volume, self).__init__(*args, **kwargs)
def obj_reset_changes(self, fields=None):
super(Volume, self).obj_reset_changes(fields)
def _obj_from_primitive(cls, context, objver, primitive):
# TODO: (Y release) remove next line
obj = super(Volume, Volume)._obj_from_primitive(context, objver,
return obj
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata)
if 'metadata' in self else {})
if fields is None or 'admin_metadata' in fields:
self._orig_admin_metadata = (dict(self.admin_metadata)
if 'admin_metadata' in self
else {})
if fields is None or 'glance_metadata' in fields:
self._orig_glance_metadata = (dict(self.glance_metadata)
if 'glance_metadata' in self
else {})
def obj_what_changed(self):
changes = super(Volume, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
if ('admin_metadata' in self and
self.admin_metadata != self._orig_admin_metadata):
if ('glance_metadata' in self and
self.glance_metadata != self._orig_glance_metadata):
return changes
def obj_make_compatible(self, primitive, target_version):
"""Make a Volume representation compatible with a target version."""
super(Volume, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
# TODO: (Y release) remove next 2 lines & method if nothing else below
if target_version < (1, 9):
primitive.pop('use_quota', None)
def _from_db_object(cls, context, volume, db_volume, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in volume.fields.items():
if name in cls.OPTIONAL_FIELDS:
value = db_volume.get(name)
if isinstance(field, fields.IntegerField):
value = value or 0
volume[name] = value
# Get data from db_volume object that was queried by joined query
# from DB
if 'metadata' in expected_attrs:
metadata = db_volume.get('volume_metadata', [])
volume.metadata = {item['key']: item['value'] for item in metadata}
if 'admin_metadata' in expected_attrs:
metadata = db_volume.get('volume_admin_metadata', [])
volume.admin_metadata = {item['key']: item['value']
for item in metadata}
if 'glance_metadata' in expected_attrs:
metadata = db_volume.get('volume_glance_metadata', [])
volume.glance_metadata = {item['key']: item['value']
for item in metadata}
if 'volume_type' in expected_attrs:
db_volume_type = db_volume.get('volume_type')
if db_volume_type:
vt_expected_attrs = []
if 'volume_type.extra_specs' in expected_attrs:
volume.volume_type = objects.VolumeType._from_db_object(
context, objects.VolumeType(), db_volume_type,
if 'volume_attachment' in expected_attrs:
attachments = base.obj_make_list(
context, objects.VolumeAttachmentList(context),
volume.volume_attachment = attachments
if volume.consistencygroup_id and 'consistencygroup' in expected_attrs:
consistencygroup = objects.ConsistencyGroup(context)
volume.consistencygroup = consistencygroup
if 'snapshots' in expected_attrs:
snapshots = base.obj_make_list(
context, objects.SnapshotList(context),
volume.snapshots = snapshots
if 'cluster' in expected_attrs:
db_cluster = db_volume.get('cluster')
# If this volume doesn't belong to a cluster the cluster field in
# the ORM instance will have value of None.
if db_cluster:
volume.cluster = objects.Cluster(context)
objects.Cluster._from_db_object(context, volume.cluster,
volume.cluster = None
if volume.group_id and 'group' in expected_attrs:
group = objects.Group(context)
db_volume['group']) = group
volume._context = context
return volume
# TODO: (Z release): Remove method and leave the default of False from DB
def _ensure_use_quota_is_set(updates, warning=False):
if updates.get('use_quota') is None:
use_quota = not (
(updates.get('migration_status') or ''
).startswith('target:') or
(updates.get('admin_metadata') or {}
).get('temporary') == 'True')
if warning and not use_quota:
LOG.warning('Ooooops, we forgot to set the use_quota field to '
'False!! Fix code here')
updates['use_quota'] = use_quota
def populate_consistencygroup(self):
"""Populate CG fields based on group fields.
Method assumes that consistencygroup_id and consistencygroup fields
have not already been set.
This is a hack to support backward compatibility of consistencygroup,
where we set the fields but don't want to write them to the DB, so we
mark them as not changed, so they won't be stored on the next save().
self.consistencygroup_id = self.group_id
if self.group_id and self.obj_attr_is_set('group'):
cg = objects.ConsistencyGroup()
self.consistencygroup = cg
self.obj_reset_changes(['consistencygroup', 'consistencygroup_id'])
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.cinder_obj_get_changes()
if 'consistencygroup' in updates:
raise exception.ObjectActionError(
action='create', reason=_('consistencygroup assigned'))
if 'snapshots' in updates:
raise exception.ObjectActionError(
action='create', reason=_('snapshots assigned'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
if 'group' in updates:
raise exception.ObjectActionError(
action='create', reason=_('group assigned'))
if ('volume_type_id' not in updates or
updates['volume_type_id'] is None):
updates['volume_type_id'] = (
# TODO: (Y release) Remove this call since we should have already made
# all methods in Cinder make the call with the right values.
self._ensure_use_quota_is_set(updates, warning=True)
db_volume = db.volume_create(self._context, updates)
expected_attrs = self._get_expected_attrs(self._context)
self._from_db_object(self._context, self, db_volume, expected_attrs)
def save(self):
# TODO: (Y release) Remove this online migration code
# Pass self directly since it's a CinderObjectDictCompat
updates = self.cinder_obj_get_changes()
if updates:
# NOTE(xyang): Allow this to pass if 'consistencygroup' is
# set to None. This is to support backward compatibility.
# Also remove 'consistencygroup' from updates because
# consistencygroup is the name of a relationship in the ORM
# Volume model, so SQLA tries to do some kind of update of
# the foreign key based on the provided updates if
# 'consistencygroup' is in updates.
if updates.pop('consistencygroup', None):
raise exception.ObjectActionError(
action='save', reason=_('consistencygroup changed'))
if 'group' in updates:
raise exception.ObjectActionError(
action='save', reason=_('group changed'))
if 'glance_metadata' in updates:
raise exception.ObjectActionError(
action='save', reason=_('glance_metadata changed'))
if 'snapshots' in updates:
raise exception.ObjectActionError(
action='save', reason=_('snapshots changed'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cluster changed'))
if 'metadata' in updates:
# Metadata items that are not specified in the
# self.metadata will be deleted
metadata = updates.pop('metadata', None)
self.metadata = db.volume_metadata_update(self._context,, metadata,
if self._context.is_admin and 'admin_metadata' in updates:
metadata = updates.pop('admin_metadata', None)
self.admin_metadata = db.volume_admin_metadata_update(
self._context,, metadata, True)
# When we are creating a volume and we change from 'creating'
# status to 'downloading' status we have to change the worker entry
# in the DB to reflect this change, otherwise the cleanup will
# not be performed as it will be mistaken for a volume that has
# been somehow changed (reset status, forced operation...)
if updates.get('status') == 'downloading':
# updates are changed after popping out metadata.
if updates:
db.volume_update(self._context,, updates)
def destroy(self):
with self.obj_as_admin():
updated_values = db.volume_destroy(self._context,
def obj_load_attr(self, attrname):
if attrname not in self.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
if attrname == 'metadata':
self.metadata = db.volume_metadata_get(self._context,
elif attrname == 'admin_metadata':
self.admin_metadata = {}
if self._context.is_admin:
self.admin_metadata = db.volume_admin_metadata_get(
elif attrname == 'glance_metadata':
# NOTE(dulek): We're using alias here to have conversion from
# list to dict done there.
self.volume_glance_metadata = db.volume_glance_metadata_get(
except exception.GlanceMetadataNotFound:
# NOTE(dulek): DB API raises when volume has no
# glance_metadata. Silencing this because at this level no
# metadata is a completely valid result.
self.glance_metadata = {}
elif attrname == 'volume_type':
# If the volume doesn't have volume_type, VolumeType.get_by_id
# would trigger a db call which raise VolumeTypeNotFound exception.
self.volume_type = (objects.VolumeType.get_by_id(
self._context, self.volume_type_id) if self.volume_type_id
else None)
elif attrname == 'volume_attachment':
attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
self.volume_attachment = attachments
elif attrname == 'consistencygroup':
if self.consistencygroup_id is None:
self.consistencygroup = None
consistencygroup = objects.ConsistencyGroup.get_by_id(
self._context, self.consistencygroup_id)
self.consistencygroup = consistencygroup
elif attrname == 'snapshots':
self.snapshots = objects.SnapshotList.get_all_for_volume(
elif attrname == 'cluster':
# If this volume doesn't belong to a cluster (cluster_name is
# empty), then cluster field will be None.
if self.cluster_name:
self.cluster = objects.Cluster.get_by_id(
self._context, name=self.cluster_name)
self.cluster = None
elif attrname == 'group':
if self.group_id is None: = None
group = objects.Group.get_by_id(
self._context, self.group_id) = group
def delete_metadata_key(self, key):
db.volume_metadata_delete(self._context,, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
if not md_was_changed:
def finish_volume_migration(self, dest_volume):
# We swap fields between source (i.e. self) and destination at the
# end of migration because we want to keep the original volume id
# in the DB but now pointing to the migrated volume.
skip = ({'id', 'provider_location', 'glance_metadata', 'use_quota',
'volume_type', 'volume_attachment'}
| set(self.obj_extra_fields))
for key in set(dest_volume.fields.keys()) - skip:
# Only swap attributes that are already set. We do not want to
# unexpectedly trigger a lazy-load.
if not dest_volume.obj_attr_is_set(key):
value = getattr(dest_volume, key)
value_to_dst = getattr(self, key)
# Destination must have a _name_id since the id no longer matches
# the volume. If it doesn't have a _name_id we set one.
if key == '_name_id':
if not dest_volume._name_id:
setattr(dest_volume, key,
elif key == 'migration_status':
value = None
value_to_dst = 'deleting'
elif key == 'display_description':
value_to_dst = 'migration src for ' +
elif key == 'status':
value_to_dst = 'deleting'
# Because dest_volume will be deleted soon, we can
# skip to copy volume_type_id and volume_type which
# are not keys for volume deletion.
elif key == 'volume_type_id':
# Initialize volume_type of source volume using
# new volume_type_id.
self.update({'volume_type_id': value})
setattr(self, key, value)
setattr(dest_volume, key, value_to_dst)
return dest_volume
def get_latest_snapshot(self):
"""Get volume's latest snapshot"""
snapshot_db = db.snapshot_get_latest_for_volume(self._context,
snapshot = objects.Snapshot(self._context)
return snapshot._from_db_object(self._context,
snapshot, snapshot_db)
def _is_cleanable(status, obj_version):
# Before 1.6 we didn't have workers table, so cleanup wasn't supported.
# cleaning.
if obj_version and obj_version < 1.6:
return False
return status in ('creating', 'deleting', 'uploading', 'downloading')
def begin_attach(self, attach_mode):
attachment = objects.VolumeAttachment(
with self.obj_as_admin():
self.admin_metadata['attached_mode'] = attach_mode
return attachment
def finish_detach(self, attachment_id):
with self.obj_as_admin():
volume_updates, attachment_updates = (
db.volume_detached(self._context,, attachment_id))
self.admin_metadata.pop('attached_mode', None)
# Remove attachment in volume only when this field is loaded.
if attachment_updates and self.obj_attr_is_set('volume_attachment'):
for i, attachment in enumerate(self.volume_attachment):
if == attachment_id:
del self.volume_attachment.objects[i]
list(volume_updates.keys()) +
['volume_attachment', 'admin_metadata'])
def is_replicated(self):
return self.volume_type and self.volume_type.is_replicated()
def is_multiattach(self):
return self.volume_type and self.volume_type.is_multiattach()
# Don't add it as a property to avoid having to add it obj_extra_fields,
# to manager's _VOLUME_CLONE_SKIP_PROPERTIES, etc.
def is_migration_target(self):
return (self.migration_status or '').startswith('target:')
class VolumeList(base.ObjectListBase, base.CinderObject):
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Volume'),
def include_in_cluster(context, cluster, partial_rename=True, **filters):
"""Include all volumes matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with
cluster parameter value directly, we'll replace provided cluster_name
or host filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using
cluster_name to filter, we'll use that same DB field to replace the
cluster value and leave the rest as it is. Likewise if we use the host
to filter.
Returns the number of volumes that have been changed.
return db.volume_include_in_cluster(context, cluster, partial_rename,
def _get_expected_attrs(cls, context, *args, **kwargs):
expected_attrs = ['metadata', 'volume_type', 'volume_attachment']
if context.is_admin:
return expected_attrs
def get_all(cls, context, marker=None, limit=None, sort_keys=None,
sort_dirs=None, filters=None, offset=None):
volumes = db.volume_get_all(context, marker, limit,
sort_keys=sort_keys, sort_dirs=sort_dirs,
filters=filters, offset=offset)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
def get_all_by_host(cls, context, host, filters=None):
volumes = db.volume_get_all_by_host(context, host, filters)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
def get_all_by_group(cls, context, group_id, filters=None):
# Consistency group
volumes = db.volume_get_all_by_group(context, group_id, filters)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
def get_all_by_generic_group(cls, context, group_id, filters=None):
# Generic volume group
volumes = db.volume_get_all_by_generic_group(context, group_id,
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
def get_all_by_project(cls, context, project_id, marker=None, limit=None,
sort_keys=None, sort_dirs=None, filters=None,
volumes = db.volume_get_all_by_project(context, project_id, marker,
limit, sort_keys=sort_keys,
filters=filters, offset=offset)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)
def get_volume_summary(cls, context, project_only, filters=None):
volumes = db.get_volume_summary(context, project_only, filters)
return volumes
def get_all_active_by_window(cls, context, begin, end):
volumes = db.volume_get_all_active_by_window(context, begin, end)
expected_attrs = cls._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Volume,
volumes, expected_attrs=expected_attrs)