Merge "db: Fix up some API signatures, other style issues"

This commit is contained in:
Zuul 2023-05-09 16:45:52 +00:00 committed by Gerrit Code Review
commit 2f2f98dd10
3 changed files with 373 additions and 288 deletions

View File

@ -70,7 +70,7 @@ class MessagesController(wsgi.Controller):
# Not found exception will be handled at the wsgi level
message = self.message_api.get(context, id)
context.authorize(policy.DELETE_POLICY, target_obj=message)
self.message_api.delete(context, message)
self.message_api.delete(context, id)
return webob.Response(status_int=HTTPStatus.NO_CONTENT)

View File

@ -25,15 +25,12 @@ interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
pool of available hardware (Default: True)
"""
from oslo_config import cfg
@ -97,18 +94,6 @@ def dispose_engine():
###################
def resource_exists(context, model, resource_id):
return IMPL.resource_exists(context, model, resource_id)
def get_model_for_versioned_object(versioned_object):
return IMPL.get_model_for_versioned_object(versioned_object)
def get_by_id(context, model, id, *args, **kwargs):
return IMPL.get_by_id(context, model, id, *args, **kwargs)
class Condition(object):
"""Class for normal condition values for conditional_update."""
def __init__(self, value, field=None):
@ -160,6 +145,24 @@ class Case(object):
self.else_ = else_
###################
def resource_exists(context, model, resource_id):
return IMPL.resource_exists(context, model, resource_id)
def get_model_for_versioned_object(versioned_object):
return IMPL.get_model_for_versioned_object(versioned_object)
def get_by_id(context, model, id, *args, **kwargs):
return IMPL.get_by_id(context, model, id, *args, **kwargs)
###################
def is_orm_value(obj):
"""Check if object is an ORM field."""
return IMPL.is_orm_value(obj)
@ -170,7 +173,7 @@ def conditional_update(
model,
values,
expected_values,
filters=(),
filters=None,
include_deleted='no',
project_only=False,
order=None,
@ -363,20 +366,20 @@ def cluster_create(context, values):
return IMPL.cluster_create(context, values)
def cluster_update(context, id, values):
def cluster_update(context, cluster_id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
return IMPL.cluster_update(context, id, values)
return IMPL.cluster_update(context, cluster_id, values)
def cluster_destroy(context, id):
def cluster_destroy(context, cluster_id):
"""Destroy the cluster or raise if it does not exist or has hosts.
:raise ClusterNotFound: If cluster doesn't exist.
"""
return IMPL.cluster_destroy(context, id)
return IMPL.cluster_destroy(context, cluster_id)
###############
@ -387,11 +390,25 @@ def volume_attach(context, values):
return IMPL.volume_attach(context, values)
def volume_attached(context, volume_id, instance_id, host_name, mountpoint,
attach_mode='rw', mark_attached=True):
def volume_attached(
context,
attachment_id,
instance_uuid,
host_name,
mountpoint,
attach_mode='rw',
mark_attached=True,
):
"""Ensure that a volume is set as attached."""
return IMPL.volume_attached(context, volume_id, instance_id, host_name,
mountpoint, attach_mode, mark_attached)
return IMPL.volume_attached(
context,
attachment_id,
instance_uuid,
host_name,
mountpoint,
attach_mode,
mark_attached,
)
def volume_create(context, values):
@ -623,9 +640,9 @@ def snapshot_get_all_by_host(context, host, filters=None):
return IMPL.snapshot_get_all_by_host(context, host, filters)
def snapshot_get_all_for_cgsnapshot(context, project_id):
def snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id):
"""Get all snapshots belonging to a cgsnapshot."""
return IMPL.snapshot_get_all_for_cgsnapshot(context, project_id)
return IMPL.snapshot_get_all_for_cgsnapshot(context, cgsnapshot_id)
def snapshot_get_all_for_group_snapshot(context, group_snapshot_id):
@ -833,9 +850,9 @@ def volume_type_qos_specs_get(context, type_id):
return IMPL.volume_type_qos_specs_get(context, type_id)
def volume_type_destroy(context, id):
def volume_type_destroy(context, type_id):
"""Delete a volume type."""
return IMPL.volume_type_destroy(context, id)
return IMPL.volume_type_destroy(context, type_id)
def volume_get_all_active_by_window(context, begin, end=None, project_id=None):
@ -951,9 +968,9 @@ def group_types_get_by_name_or_id(context, group_type_list):
return IMPL.group_types_get_by_name_or_id(context, group_type_list)
def group_type_destroy(context, id):
def group_type_destroy(context, type_id):
"""Delete a group type."""
return IMPL.group_type_destroy(context, id)
return IMPL.group_type_destroy(context, type_id)
def group_type_access_get_all(context, type_id):
@ -989,17 +1006,17 @@ def volume_type_extra_specs_delete(context, volume_type_id, key):
return IMPL.volume_type_extra_specs_delete(context, volume_type_id, key)
def volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs):
def volume_type_extra_specs_update_or_create(
context, volume_type_id, extra_specs,
):
"""Create or update volume type extra specs.
This adds or modifies the key/value pairs specified in the extra specs dict
argument.
"""
return IMPL.volume_type_extra_specs_update_or_create(context,
volume_type_id,
extra_specs)
return IMPL.volume_type_extra_specs_update_or_create(
context, volume_type_id, extra_specs,
)
###################
@ -1039,14 +1056,13 @@ def volume_type_encryption_delete(context, volume_type_id):
return IMPL.volume_type_encryption_delete(context, volume_type_id)
def volume_type_encryption_create(context, volume_type_id, encryption_specs):
def volume_type_encryption_create(context, volume_type_id, values):
return IMPL.volume_type_encryption_create(context, volume_type_id,
encryption_specs)
values)
def volume_type_encryption_update(context, volume_type_id, encryption_specs):
return IMPL.volume_type_encryption_update(context, volume_type_id,
encryption_specs)
def volume_type_encryption_update(context, volume_type_id, values):
return IMPL.volume_type_encryption_update(context, volume_type_id, values)
def volume_type_encryption_volume_get(context, volume_type_id):
@ -1065,9 +1081,9 @@ def qos_specs_create(context, values):
return IMPL.qos_specs_create(context, values)
def qos_specs_get(context, qos_specs_id):
def qos_specs_get(context, qos_specs_id, inactive=False):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get(context, qos_specs_id)
return IMPL.qos_specs_get(context, qos_specs_id, inactive)
def qos_specs_get_all(context, filters=None, marker=None, limit=None,
@ -1078,9 +1094,9 @@ def qos_specs_get_all(context, filters=None, marker=None, limit=None,
sort_keys=sort_keys, sort_dirs=sort_dirs)
def qos_specs_get_by_name(context, name):
def qos_specs_get_by_name(context, name, inactive=False):
"""Get all specification for a given qos_specs."""
return IMPL.qos_specs_get_by_name(context, name)
return IMPL.qos_specs_get_by_name(context, name, inactive)
def qos_specs_associations_get(context, qos_specs_id):
@ -1113,13 +1129,13 @@ def qos_specs_item_delete(context, qos_specs_id, key):
return IMPL.qos_specs_item_delete(context, qos_specs_id, key)
def qos_specs_update(context, qos_specs_id, specs):
def qos_specs_update(context, qos_specs_id, values):
"""Update qos specs.
This adds or modifies the key/value pairs specified in the
specs dict argument for a given qos_specs.
"""
return IMPL.qos_specs_update(context, qos_specs_id, specs)
return IMPL.qos_specs_update(context, qos_specs_id, values)
###################
@ -1268,9 +1284,9 @@ def quota_class_update(context, class_name, resource, limit):
return IMPL.quota_class_update(context, class_name, resource, limit)
def quota_class_update_resource(context, resource, new_resource):
def quota_class_update_resource(context, old_res, new_res):
"""Update resource name in quota_class."""
return IMPL.quota_class_update_resource(context, resource, new_resource)
return IMPL.quota_class_update_resource(context, old_res, new_res)
def quota_class_destroy(context, class_name, resource):
@ -1580,9 +1596,19 @@ def group_get_all(context, filters=None, marker=None, limit=None,
sort_dirs=sort_dirs)
def group_create(context, values, group_snapshot_id=None, group_id=None):
def group_create(
context,
values,
group_snapshot_id=None,
source_group_id=None,
):
"""Create a group from the values dictionary."""
return IMPL.group_create(context, values, group_snapshot_id, group_id)
return IMPL.group_create(
context,
values,
group_snapshot_id,
source_group_id,
)
def group_get_all_by_project(context, project_id, filters=None,

View File

@ -16,7 +16,23 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
"""Defines interface for DB access.
Functions in this module are imported into the cinder.db namespace. Call these
functions from cinder.db namespace, not the cinder.db.api namespace.
All functions in this module return objects that implement a dictionary-like
interface. Currently, many of these objects are sqlalchemy objects that
implement a dictionary interface. However, a future goal is to have all of
these objects be simple dictionaries.
**Related Flags**
:connection: string specifying the sqlalchemy connection to use, like:
`sqlite:///var/lib/cinder/cinder.sqlite`.
:enable_new_services: when adding a new service to the database, is it in the
pool of available hardware (Default: True)
"""
import collections
from collections import abc
@ -183,6 +199,9 @@ def require_context(f):
return wrapper
###################
@require_context
@main_context_manager.reader
def resource_exists(context, model, resource_id):
@ -318,131 +337,6 @@ def model_query(context, model, *args, **kwargs):
return query
def _sync_volumes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
volumes, _ = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
snapshots, _ = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
backups, _ = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'backups'
return {key: backups}
def _sync_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, vol_gigs = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
_, snap_gigs = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: vol_gigs + snap_gigs}
def _sync_consistencygroups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, groups = _consistencygroup_data_get_for_project(context, project_id)
key = 'consistencygroups'
return {key: groups}
def _sync_backup_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
key = 'backup_gigabytes'
_, backup_gigs = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: backup_gigs}
def _sync_groups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, groups = _group_data_get_for_project(context, project_id)
key = 'groups'
return {key: groups}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes,
'_sync_groups': _sync_groups,
}
###################
@ -697,6 +591,134 @@ def conditional_update(
###################
def _sync_volumes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
volumes, _ = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
snapshots, _ = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_backups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
backups, _ = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'backups'
return {key: backups}
def _sync_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, vol_gigs = _volume_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
_, snap_gigs = _snapshot_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: vol_gigs + snap_gigs}
def _sync_consistencygroups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, groups = _consistencygroup_data_get_for_project(context, project_id)
key = 'consistencygroups'
return {key: groups}
def _sync_backup_gigabytes(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
key = 'backup_gigabytes'
_, backup_gigs = _backup_data_get_for_project(
context,
project_id,
volume_type_id=volume_type_id,
)
return {key: backup_gigs}
def _sync_groups(
context,
project_id,
volume_type_id=None,
volume_type_name=None,
):
_, groups = _group_data_get_for_project(context, project_id)
key = 'groups'
return {key: groups}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
'_sync_consistencygroups': _sync_consistencygroups,
'_sync_backups': _sync_backups,
'_sync_backup_gigabytes': _sync_backup_gigabytes,
'_sync_groups': _sync_groups,
}
###################
def _clean_filters(filters):
return {k: v for k, v in filters.items() if v is not None}
@ -1128,22 +1150,22 @@ def cluster_create(context, values):
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def cluster_update(context, id, values):
def cluster_update(context, cluster_id, values):
"""Set the given properties on an cluster and update it.
Raises ClusterNotFound if cluster does not exist.
"""
query = _cluster_query(context, id=id)
query = _cluster_query(context, id=cluster_id)
result = query.update(values)
if not result:
raise exception.ClusterNotFound(id=id)
raise exception.ClusterNotFound(id=cluster_id)
@require_admin_context
@main_context_manager.writer
def cluster_destroy(context, id):
def cluster_destroy(context, cluster_id):
"""Destroy the cluster or raise if it does not exist or has hosts."""
query = _cluster_query(context, id=id)
query = _cluster_query(context, id=cluster_id)
query = query.filter(models.Cluster.num_hosts == 0)
# If the update doesn't succeed we don't know if it's because the
# cluster doesn't exist or because it has hosts.
@ -1154,9 +1176,9 @@ def cluster_destroy(context, id):
if not result:
# This will fail if the cluster doesn't exist raising the right
# exception
cluster_get(context, id=id)
cluster_get(context, id=cluster_id)
# If it doesn't fail, then the problem is that there are hosts
raise exception.ClusterHasHosts(id=id)
raise exception.ClusterHasHosts(id=cluster_id)
###################
@ -1863,6 +1885,8 @@ def quota_destroy_by_project(context, project_id):
quota_destroy_all_by_project(context, project_id, only_quotas=True)
# TODO(stephenfin): No one is using this except 'quota_destroy_by_project'
# above, so the only_quotas=False path could be removed.
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
@ -1939,8 +1963,8 @@ def volume_attached(
instance_uuid,
host_name,
mountpoint,
attach_mode,
mark_attached,
attach_mode='rw',
mark_attached=True,
):
"""This method updates a volume attachment entry.
@ -2521,9 +2545,10 @@ def volume_attachment_get_all_by_volume_id(context, volume_id):
return result
# FIXME(jdg): Not using filters
@require_context
@main_context_manager.reader
def volume_attachment_get_all_by_host(context, host):
def volume_attachment_get_all_by_host(context, host, filters=None):
result = (
model_query(context, models.VolumeAttachment)
.filter_by(attached_host=host)
@ -2544,9 +2569,14 @@ def volume_attachment_get(context, attachment_id):
return _attachment_get(context, attachment_id)
# FIXME(jdg): Not using filters
@require_context
@main_context_manager.reader
def volume_attachment_get_all_by_instance_uuid(context, instance_uuid):
def volume_attachment_get_all_by_instance_uuid(
context,
instance_uuid,
filters=None,
):
"""Fetch all attachment records associated with the specified instance."""
result = (
model_query(context, models.VolumeAttachment)
@ -3576,7 +3606,12 @@ def volume_metadata_get(context, volume_id):
@require_volume_exists
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_metadata_delete(context, volume_id, key, meta_type):
def volume_metadata_delete(
context,
volume_id,
key,
meta_type=common.METADATA_TYPES.user,
):
if meta_type == common.METADATA_TYPES.user:
query = _volume_user_metadata_get_query(context, volume_id).filter_by(
key=key
@ -3614,7 +3649,13 @@ def volume_metadata_delete(context, volume_id, key, meta_type):
@handle_db_data_error
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_metadata_update(context, volume_id, metadata, delete, meta_type):
def volume_metadata_update(
context,
volume_id,
metadata,
delete,
meta_type=common.METADATA_TYPES.user,
):
if meta_type == common.METADATA_TYPES.user:
return _volume_user_metadata_update(
context, volume_id, metadata, delete
@ -4795,7 +4836,15 @@ def _group_type_get(context, id, inactive=False, expected_fields=None):
@require_context
@main_context_manager.reader
def volume_type_get(context, id, inactive=False, expected_fields=None):
"""Return a dict describing specific volume_type."""
"""Get volume type by id.
:param context: context to query under
:param id: Volume type id to get.
:param inactive: Consider inactive volume types when searching
:param expected_fields: Return those additional fields.
Supported fields are: projects.
:returns: volume type
"""
return _volume_type_get(
context, id, inactive=inactive, expected_fields=expected_fields
@ -5035,15 +5084,17 @@ def volume_type_qos_specs_get(context, type_id):
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def volume_type_destroy(context, id):
def volume_type_destroy(context, type_id):
utcnow = timeutils.utcnow()
vol_types = volume_type_get_all(context)
if len(vol_types) <= 1:
raise exception.VolumeTypeDeletionError(volume_type_id=id)
_volume_type_get(context, id)
raise exception.VolumeTypeDeletionError(volume_type_id=type_id)
_volume_type_get(context, type_id)
results = (
model_query(context, models.Volume).filter_by(volume_type_id=id).all()
model_query(context, models.Volume)
.filter_by(volume_type_id=type_id)
.all()
)
group_count = (
@ -5052,7 +5103,7 @@ def volume_type_destroy(context, id):
models.GroupVolumeTypeMapping,
read_deleted="no",
)
.filter_by(volume_type_id=id)
.filter_by(volume_type_id=type_id)
.count()
)
cg_count = (
@ -5060,14 +5111,14 @@ def volume_type_destroy(context, id):
context,
models.ConsistencyGroup,
)
.filter(models.ConsistencyGroup.volume_type_id.contains(id))
.filter(models.ConsistencyGroup.volume_type_id.contains(type_id))
.count()
)
if results or group_count or cg_count:
LOG.error('VolumeType %s deletion failed, VolumeType in use.', id)
raise exception.VolumeTypeInUse(volume_type_id=id)
LOG.error('VolumeType %s deletion failed, VolumeType in use.', type_id)
raise exception.VolumeTypeInUse(volume_type_id=type_id)
query = model_query(context, models.VolumeType).filter_by(id=id)
query = model_query(context, models.VolumeType).filter_by(id=type_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
@ -5079,7 +5130,7 @@ def volume_type_destroy(context, id):
query = model_query(
context,
models.VolumeTypeExtraSpecs,
).filter_by(volume_type_id=id)
).filter_by(volume_type_id=type_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
@ -5090,7 +5141,7 @@ def volume_type_destroy(context, id):
)
query = model_query(context, models.Encryption).filter_by(
volume_type_id=id
volume_type_id=type_id
)
entity = query.column_descriptions[0]['entity']
query.update(
@ -5103,7 +5154,7 @@ def volume_type_destroy(context, id):
model_query(
context, models.VolumeTypeProjects, read_deleted="int_no"
).filter_by(volume_type_id=id).soft_delete(synchronize_session=False)
).filter_by(volume_type_id=type_id).soft_delete(synchronize_session=False)
del updated_values['updated_at']
return updated_values
@ -5111,16 +5162,18 @@ def volume_type_destroy(context, id):
@require_admin_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def group_type_destroy(context, id):
_group_type_get(context, id)
def group_type_destroy(context, type_id):
_group_type_get(context, type_id)
results = (
model_query(context, models.Group).filter_by(group_type_id=id).all()
model_query(context, models.Group)
.filter_by(group_type_id=type_id)
.all()
)
if results:
LOG.error('GroupType %s deletion failed, ' 'GroupType in use.', id)
raise exception.GroupTypeInUse(group_type_id=id)
LOG.error('GroupType %s deletion failed, GroupType in use.', type_id)
raise exception.GroupTypeInUse(group_type_id=type_id)
query = model_query(context, models.GroupType).filter_by(id=id)
query = model_query(context, models.GroupType).filter_by(id=type_id)
entity = query.column_descriptions[0]['entity']
query.update(
{
@ -5131,7 +5184,7 @@ def group_type_destroy(context, id):
)
query = model_query(context, models.GroupTypeSpecs).filter_by(
group_type_id=id
group_type_id=type_id
)
entity = query.column_descriptions[0]['entity']
query.update(
@ -5441,9 +5494,13 @@ def _volume_type_extra_specs_get_item(context, volume_type_id, key):
@handle_db_data_error
@require_context
@main_context_manager.writer
def volume_type_extra_specs_update_or_create(context, volume_type_id, specs):
def volume_type_extra_specs_update_or_create(
context,
volume_type_id,
extra_specs,
):
spec_ref = None
for key, value in specs.items():
for key, value in extra_specs.items():
try:
spec_ref = _volume_type_extra_specs_get_item(
context,
@ -5463,7 +5520,7 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, specs):
)
spec_ref.save(context.session)
return specs
return extra_specs
####################
@ -5524,9 +5581,9 @@ def _group_type_specs_get_item(context, group_type_id, key):
@handle_db_data_error
@require_context
@main_context_manager.writer
def group_type_specs_update_or_create(context, group_type_id, specs):
def group_type_specs_update_or_create(context, group_type_id, group_specs):
spec_ref = None
for key, value in specs.items():
for key, value in group_specs.items():
try:
spec_ref = _group_type_specs_get_item(context, group_type_id, key)
except exception.GroupTypeSpecsNotFound:
@ -5541,7 +5598,7 @@ def group_type_specs_update_or_create(context, group_type_id, specs):
)
spec_ref.save(context.session)
return specs
return group_specs
####################
@ -5915,18 +5972,18 @@ def _qos_specs_get_item(context, qos_specs_id, key):
@require_admin_context
@require_qos_specs_exists
@main_context_manager.writer
def qos_specs_update(context, qos_specs_id, updates):
def qos_specs_update(context, qos_specs_id, values):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
specs = updates.get('specs', {})
specs = values.get('specs', {})
if 'consumer' in updates:
if 'consumer' in values:
# Massage consumer to the right place for DB and copy specs
# before updating so we don't modify dict for caller
specs = specs.copy()
specs['consumer'] = updates['consumer']
specs['consumer'] = values['consumer']
spec_ref = None
for key in specs.keys():
try:
@ -8254,12 +8311,9 @@ def message_create(context, values):
@require_admin_context
@main_context_manager.writer
def message_destroy(context, message):
def message_destroy(context, message_id):
now = timeutils.utcnow()
query = model_query(
context,
models.Message,
).filter_by(id=message.get('id'))
query = model_query(context, models.Message).filter_by(id=message_id)
entity = query.column_descriptions[0]['entity']
updated_values = {
'deleted': True,
@ -8326,64 +8380,6 @@ def driver_initiator_data_get(context, initiator, namespace):
###############################
PAGINATION_HELPERS = {
models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
models.QualityOfServiceSpecs: (
_qos_specs_get_query,
_process_qos_specs_filters,
_qos_specs_get,
),
models.VolumeType: (
_volume_type_get_query,
_process_volume_types_filters,
_volume_type_get_db_object,
),
models.ConsistencyGroup: (
_consistencygroups_get_query,
_process_consistencygroups_filters,
_consistencygroup_get,
),
models.Message: (
_messages_get_query,
_process_messages_filters,
_message_get,
),
models.GroupType: (
_group_type_get_query,
_process_group_types_filters,
_group_type_get_db_object,
),
models.Group: (_groups_get_query, _process_groups_filters, _group_get),
models.GroupSnapshot: (
_group_snapshot_get_query,
_process_group_snapshot_filters,
_group_snapshot_get,
),
models.VolumeAttachment: (
_attachment_get_query,
_process_attachment_filters,
_attachment_get,
),
models.Transfer: (
_transfer_get_query,
_process_transfer_filters,
_transfer_get,
),
}
CALCULATE_COUNT_HELPERS = {
'volume': (_volume_get_query, _process_volume_filters),
'snapshot': (_snaps_get_query, _process_snaps_filters),
'backup': (_backups_get_query, _process_backups_filters),
}
###############################
@require_context
@main_context_manager.writer
def image_volume_cache_create(
@ -8550,9 +8546,14 @@ def worker_get(context, **filters):
@require_context
@main_context_manager.reader
def worker_get_all(context, **filters):
def worker_get_all(context, until=None, db_filters=None, **filters):
"""Get all workers that match given criteria."""
query = _worker_query(context, **filters)
query = _worker_query(
context,
until=until,
db_filters=db_filters,
**filters,
)
return query.all() if query else []
@ -8704,3 +8705,61 @@ def use_quota_online_data_migration(
# models.VolumeAdminMetadata.delete_values)
#
# return total, updated
###############################
PAGINATION_HELPERS = {
models.Volume: (_volume_get_query, _process_volume_filters, _volume_get),
models.Snapshot: (_snaps_get_query, _process_snaps_filters, _snapshot_get),
models.Backup: (_backups_get_query, _process_backups_filters, _backup_get),
models.QualityOfServiceSpecs: (
_qos_specs_get_query,
_process_qos_specs_filters,
_qos_specs_get,
),
models.VolumeType: (
_volume_type_get_query,
_process_volume_types_filters,
_volume_type_get_db_object,
),
models.ConsistencyGroup: (
_consistencygroups_get_query,
_process_consistencygroups_filters,
_consistencygroup_get,
),
models.Message: (
_messages_get_query,
_process_messages_filters,
_message_get,
),
models.GroupType: (
_group_type_get_query,
_process_group_types_filters,
_group_type_get_db_object,
),
models.Group: (_groups_get_query, _process_groups_filters, _group_get),
models.GroupSnapshot: (
_group_snapshot_get_query,
_process_group_snapshot_filters,
_group_snapshot_get,
),
models.VolumeAttachment: (
_attachment_get_query,
_process_attachment_filters,
_attachment_get,
),
models.Transfer: (
_transfer_get_query,
_process_transfer_filters,
_transfer_get,
),
}
CALCULATE_COUNT_HELPERS = {
'volume': (_volume_get_query, _process_volume_filters),
'snapshot': (_snaps_get_query, _process_snaps_filters),
'backup': (_backups_get_query, _process_backups_filters),
}