Update Versioned Objects with Cluster object

This patch adds Cluster Versioned Object as counterpart for the Cluster
ORM class, and updates ConsistencyGroup, Volume, and Service Versioned
Objects.

Specs: https://review.openstack.org/327283
Implements: blueprint cinder-volume-active-active-support
Change-Id: Ie6857cf7db52cf284d89bad704e2c679e5549966
This commit is contained in:
Gorka Eguileor 2016-05-23 14:22:33 +02:00
parent 57ea6967bf
commit 625cab15b0
16 changed files with 672 additions and 31 deletions

View File

@ -26,6 +26,7 @@ def register_all():
# need to receive it via RPC.
__import__('cinder.objects.backup')
__import__('cinder.objects.cgsnapshot')
__import__('cinder.objects.cluster')
__import__('cinder.objects.consistencygroup')
__import__('cinder.objects.qos_specs')
__import__('cinder.objects.service')

View File

@ -103,6 +103,9 @@ OBJ_VERSIONS.add('1.5', {'VolumeType': '1.1'})
OBJ_VERSIONS.add('1.6', {'QualityOfServiceSpecs': '1.0',
'QualityOfServiceSpecsList': '1.0',
'VolumeType': '1.2'})
OBJ_VERSIONS.add('1.7', {'Cluster': '1.0', 'ClusterList': '1.0',
'Service': '1.4', 'Volume': '1.4',
'ConsistencyGroup': '1.3'})
class CinderObjectRegistry(base.VersionedObjectRegistry):
@ -262,7 +265,7 @@ class CinderPersistentObject(object):
self._context = original_context
@classmethod
def _get_expected_attrs(cls, context):
def _get_expected_attrs(cls, context, *args, **kwargs):
return None
@classmethod
@ -274,9 +277,10 @@ class CinderPersistentObject(object):
(cls.obj_name()))
raise NotImplementedError(msg)
model = db.get_model_for_versioned_object(cls)
orm_obj = db.get_by_id(context, model, id, *args, **kwargs)
orm_obj = db.get_by_id(context, cls.model, id, *args, **kwargs)
expected_attrs = cls._get_expected_attrs(context)
# We pass parameters because fields to expect may depend on them
expected_attrs = cls._get_expected_attrs(context, *args, **kwargs)
kargs = {}
if expected_attrs:
kargs = {'expected_attrs': expected_attrs}
@ -417,8 +421,7 @@ class CinderPersistentObject(object):
@classmethod
def exists(cls, context, id_):
model = db.get_model_for_versioned_object(cls)
return db.resource_exists(context, model, id_)
return db.resource_exists(context, cls.model, id_)
class CinderComparableObject(base.ComparableVersionedObject):
@ -438,6 +441,12 @@ class ObjectListBase(base.ObjectListBase):
target_version)
class ClusteredObject(object):
@property
def service_topic_queue(self):
return self.cluster_name or self.host
class CinderObjectSerializer(base.VersionedObjectSerializer):
OBJ_BASE_CLASS = CinderObject

View File

@ -40,6 +40,10 @@ class CGSnapshot(base.CinderPersistentObject, base.CinderObject,
'snapshots': fields.ObjectField('SnapshotList', nullable=True),
}
@property
def service_topic_queue(self):
return self.consistencygroup.service_topic_queue
@classmethod
def _from_db_object(cls, context, cgsnapshot, db_cgsnapshots,
expected_attrs=None):

190
cinder/objects/cluster.py Normal file
View File

@ -0,0 +1,190 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder import utils
CONF = cfg.CONF
@base.CinderObjectRegistry.register
class Cluster(base.CinderPersistentObject, base.CinderObject,
base.CinderComparableObject):
"""Cluster Versioned Object.
Method get_by_id supports as additional named arguments:
- get_services: If we want to load all services from this cluster.
- services_summary: If we want to load num_nodes and num_down_nodes
fields.
- is_up: Boolean value to filter based on the cluster's up status.
- read_deleted: Filtering based on delete status. Default value "no".
- Any other cluster field will be used as a filter.
"""
# Version 1.0: Initial version
VERSION = '1.0'
OPTIONAL_FIELDS = ('num_hosts', 'num_down_hosts', 'services')
# NOTE(geguileo): We don't want to expose race_preventer field at the OVO
# layer since it is only meant for the DB layer internal mechanism to
# prevent races.
fields = {
'id': fields.IntegerField(),
'name': fields.StringField(nullable=False),
'binary': fields.StringField(nullable=False),
'disabled': fields.BooleanField(default=False, nullable=True),
'disabled_reason': fields.StringField(nullable=True),
'num_hosts': fields.IntegerField(default=0, read_only=True),
'num_down_hosts': fields.IntegerField(default=0, read_only=True),
'last_heartbeat': fields.DateTimeField(nullable=True, read_only=True),
'services': fields.ObjectField('ServiceList', nullable=True,
read_only=True),
}
@classmethod
def _get_expected_attrs(cls, context, *args, **kwargs):
"""Return expected attributes when getting a cluster.
Expected attributes depend on whether we are retrieving all related
services as well as if we are getting the services summary.
"""
expected_attrs = []
if kwargs.get('get_services'):
expected_attrs.append('services')
if kwargs.get('services_summary'):
expected_attrs.extend(('num_hosts', 'num_down_hosts'))
return expected_attrs
@staticmethod
def _from_db_object(context, cluster, db_cluster, expected_attrs=None):
"""Fill cluster OVO fields from cluster ORM instance."""
expected_attrs = expected_attrs or tuple()
for name, field in cluster.fields.items():
# The only field that cannot be assigned using setattr is services,
# because it is an ObjectField. So we don't assign the value if
# it's a non expected optional field or if it's services field.
if ((name in Cluster.OPTIONAL_FIELDS
and name not in expected_attrs) or name == 'services'):
continue
value = getattr(db_cluster, name)
setattr(cluster, name, value)
cluster._context = context
if 'services' in expected_attrs:
cluster.services = base.obj_make_list(
context,
objects.ServiceList(context),
objects.Service,
db_cluster.services)
cluster.obj_reset_changes()
return cluster
def obj_load_attr(self, attrname):
"""Lazy load services attribute."""
# NOTE(geguileo): We only allow lazy loading services to raise
# awareness of the high cost of lazy loading num_hosts and
# num_down_hosts, so if we are going to need this information we should
# be certain we really need it and it should loaded when retrieving the
# data from the DB the first time we read the OVO.
if attrname != 'services':
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
self.services = objects.ServiceList.get_all(
self._context, {'cluster_name': self.name})
self.obj_reset_changes(fields=('services',))
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.cinder_obj_get_changes()
if updates:
for field in self.OPTIONAL_FIELDS:
if field in updates:
raise exception.ObjectActionError(
action='create', reason=_('%s assigned') % field)
db_cluster = db.cluster_create(self._context, updates)
self._from_db_object(self._context, self, db_cluster)
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
for field in self.OPTIONAL_FIELDS:
if field in updates:
raise exception.ObjectActionError(
action='save', reason=_('%s changed') % field)
db.cluster_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self):
with self.obj_as_admin():
updated_values = db.cluster_destroy(self._context, self.id)
for field, value in updated_values.items():
setattr(self, field, value)
self.obj_reset_changes(updated_values.keys())
def is_up(self):
return (self.last_heartbeat and
self.last_heartbeat >= utils.service_expired_time(True))
@base.CinderObjectRegistry.register
class ClusterList(base.ObjectListBase, base.CinderObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {'objects': fields.ListOfObjectsField('Cluster')}
@classmethod
def get_all(cls, context, is_up=None, get_services=False,
services_summary=False, read_deleted='no', **filters):
"""Get all clusters that match the criteria.
:param is_up: Boolean value to filter based on the cluster's up status.
:param get_services: If we want to load all services from this cluster.
:param services_summary: If we want to load num_nodes and
num_down_nodes fields.
:param read_deleted: Filtering based on delete status. Default value is
"no".
:param filters: Field based filters in the form of key/value.
"""
expected_attrs = Cluster._get_expected_attrs(
context,
get_services=get_services,
services_summary=services_summary)
clusters = db.cluster_get_all(context, is_up=is_up,
get_services=get_services,
services_summary=services_summary,
read_deleted=read_deleted,
**filters)
return base.obj_make_list(context, cls(context), Cluster, clusters,
expected_attrs=expected_attrs)

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from cinder import db
from cinder import exception
from cinder.i18n import _
@ -23,18 +25,22 @@ from oslo_versionedobjects import fields
@base.CinderObjectRegistry.register
class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat):
base.CinderObjectDictCompat, base.ClusteredObject):
# Version 1.0: Initial version
# Version 1.1: Added cgsnapshots and volumes relationships
# Version 1.2: Changed 'status' field to use ConsistencyGroupStatusField
VERSION = '1.2'
# Version 1.3: Added cluster fields
VERSION = '1.3'
OPTIONAL_FIELDS = ['cgsnapshots', 'volumes']
OPTIONAL_FIELDS = ('cgsnapshots', 'volumes', 'cluster')
fields = {
'id': fields.UUIDField(),
'user_id': fields.StringField(),
'project_id': fields.StringField(),
'cluster_name': fields.StringField(nullable=True),
'cluster': fields.ObjectField('Cluster', nullable=True,
read_only=True),
'host': fields.StringField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'name': fields.StringField(nullable=True),
@ -47,6 +53,18 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
'volumes': fields.ObjectField('VolumeList', nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
"""Make a CG representation compatible with a target version."""
# Convert all related objects
super(ConsistencyGroup, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
# Before v1.3 we didn't have cluster fields so we have to remove them.
if target_version < (1, 3):
for obj_field in ('cluster', 'cluster_name'):
primitive.pop(obj_field, None)
@classmethod
def _from_db_object(cls, context, consistencygroup, db_consistencygroup,
expected_attrs=None):
@ -72,6 +90,18 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
db_consistencygroup['volumes'])
consistencygroup.volumes = volumes
if 'cluster' in expected_attrs:
db_cluster = db_consistencygroup.get('cluster')
# If this consistency group doesn't belong to a cluster the cluster
# field in the ORM instance will have value of None.
if db_cluster:
consistencygroup.cluster = objects.Cluster(context)
objects.Cluster._from_db_object(context,
consistencygroup.cluster,
db_cluster)
else:
consistencygroup.cluster = None
consistencygroup._context = context
consistencygroup.obj_reset_changes()
return consistencygroup
@ -96,6 +126,10 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
raise exception.ObjectActionError(action='create',
reason=_('volumes assigned'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
db_consistencygroups = db.consistencygroup_create(self._context,
updates,
cg_snap_id,
@ -119,6 +153,15 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
self.volumes = objects.VolumeList.get_all_by_group(self._context,
self.id)
# If this consistency group doesn't belong to a cluster (cluster_name
# is empty), then cluster field will be None.
if attrname == 'cluster':
if self.cluster_name:
self.cluster = objects.Cluster.get_by_id(
self._context, name=self.cluster_name)
else:
self.cluster = None
self.obj_reset_changes(fields=[attrname])
def save(self):
@ -130,6 +173,9 @@ class ConsistencyGroup(base.CinderPersistentObject, base.CinderObject,
if 'volumes' in updates:
raise exception.ObjectActionError(
action='save', reason=_('volumes changed'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cluster changed'))
db.consistencygroup_update(self._context, self.id, updates)
self.obj_reset_changes()
@ -152,6 +198,26 @@ class ConsistencyGroupList(base.ObjectListBase, base.CinderObject):
'objects': fields.ListOfObjectsField('ConsistencyGroup')
}
@staticmethod
def include_in_cluster(context, cluster, partial_rename=True, **filters):
"""Include all consistency groups matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with
cluster parameter value directly, we'll replace provided cluster_name
or host filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using
cluster_name to filter, we'll use that same DB field to replace the
cluster value and leave the rest as it is. Likewise if we use the host
to filter.
Returns the number of consistency groups that have been changed.
"""
return db.consistencygroup_include_in_cluster(context, cluster,
partial_rename,
**filters)
@classmethod
def get_all(cls, context, filters=None, marker=None, limit=None,
offset=None, sort_keys=None, sort_dirs=None):

View File

@ -25,18 +25,24 @@ from cinder.objects import fields as c_fields
@base.CinderObjectRegistry.register
class Service(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat,
base.CinderComparableObject):
base.CinderObjectDictCompat, base.CinderComparableObject,
base.ClusteredObject):
# Version 1.0: Initial version
# Version 1.1: Add rpc_current_version and object_current_version fields
# Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version()
# Version 1.3: Add replication fields
VERSION = '1.3'
# Version 1.4: Add cluster fields
VERSION = '1.4'
OPTIONAL_FIELDS = ('cluster',)
fields = {
'id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'cluster_name': fields.StringField(nullable=True),
'cluster': fields.ObjectField('Cluster', nullable=True,
read_only=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(default=0),
'disabled': fields.BooleanField(default=False, nullable=True),
@ -54,9 +60,23 @@ class Service(base.CinderPersistentObject, base.CinderObject,
'active_backend_id': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
"""Make a service representation compatible with a target version."""
# Convert all related objects
super(Service, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
# Before v1.4 we didn't have cluster fields so we have to remove them.
if target_version < (1, 4):
for obj_field in ('cluster', 'cluster_name'):
primitive.pop(obj_field, None)
@staticmethod
def _from_db_object(context, service, db_service):
def _from_db_object(context, service, db_service, expected_attrs=None):
expected_attrs = expected_attrs or []
for name, field in service.fields.items():
if name in Service.OPTIONAL_FIELDS:
continue
value = db_service.get(name)
if isinstance(field, fields.IntegerField):
value = value or 0
@ -65,9 +85,40 @@ class Service(base.CinderPersistentObject, base.CinderObject,
service[name] = value
service._context = context
if 'cluster' in expected_attrs:
db_cluster = db_service.get('cluster')
# If this service doesn't belong to a cluster the cluster field in
# the ORM instance will have value of None.
if db_cluster:
service.cluster = objects.Cluster(context)
objects.Cluster._from_db_object(context, service.cluster,
db_cluster)
else:
service.cluster = None
service.obj_reset_changes()
return service
def obj_load_attr(self, attrname):
if attrname not in self.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
# NOTE(geguileo): We only have 1 optional field, so we don't need to
# confirm that we are loading the cluster.
# If this service doesn't belong to a cluster (cluster_name is empty),
# then cluster field will be None.
if self.cluster_name:
self.cluster = objects.Cluster.get_by_id(self._context,
name=self.cluster_name)
else:
self.cluster = None
self.obj_reset_changes(fields=(attrname,))
@classmethod
def get_by_host_and_topic(cls, context, host, topic):
db_service = db.service_get(context, disabled=False, host=host,
@ -84,11 +135,17 @@ class Service(base.CinderPersistentObject, base.CinderObject,
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.cinder_obj_get_changes()
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
def save(self):
updates = self.cinder_obj_get_changes()
if 'cluster' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cluster changed'))
if updates:
db.service_update(self._context, self.id, updates)
self.obj_reset_changes()

View File

@ -65,8 +65,12 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True),
}
@property
def service_topic_queue(self):
return self.volume.service_topic_queue
@classmethod
def _get_expected_attrs(cls, context):
def _get_expected_attrs(cls, context, *args, **kwargs):
return 'metadata',
# NOTE(thangp): obj_extra_fields is used to hold properties that are not
@ -151,6 +155,9 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
if 'cgsnapshot' in updates:
raise exception.ObjectActionError(action='create',
reason=_('cgsnapshot assigned'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
db_snapshot = db.snapshot_create(self._context, updates)
self._from_db_object(self._context, self, db_snapshot)
@ -165,6 +172,10 @@ class Snapshot(base.CinderPersistentObject, base.CinderObject,
raise exception.ObjectActionError(
action='save', reason=_('cgsnapshot changed'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cluster changed'))
if 'metadata' in updates:
# Metadata items that are not specified in the
# self.metadata will be deleted

View File

@ -49,17 +49,19 @@ class MetadataObject(dict):
@base.CinderObjectRegistry.register
class Volume(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat, base.CinderComparableObject):
base.CinderObjectDictCompat, base.CinderComparableObject,
base.ClusteredObject):
# Version 1.0: Initial version
# Version 1.1: Added metadata, admin_metadata, volume_attachment, and
# volume_type
# Version 1.2: Added glance_metadata, consistencygroup and snapshots
# Version 1.3: Added finish_volume_migration()
VERSION = '1.3'
# Version 1.4: Added cluster fields
VERSION = '1.4'
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
'volume_type', 'volume_attachment', 'consistencygroup',
'snapshots')
'snapshots', 'cluster')
fields = {
'id': fields.UUIDField(),
@ -70,6 +72,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
'snapshot_id': fields.UUIDField(nullable=True),
'cluster_name': fields.StringField(nullable=True),
'cluster': fields.ObjectField('Cluster', nullable=True,
read_only=True),
'host': fields.StringField(nullable=True),
'size': fields.IntegerField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
@ -122,7 +127,7 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
'volume_admin_metadata', 'volume_glance_metadata']
@classmethod
def _get_expected_attrs(cls, context):
def _get_expected_attrs(cls, context, *args, **kwargs):
expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs']
if context.is_admin:
expected_attrs.append('admin_metadata')
@ -221,9 +226,15 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
return changes
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version."""
"""Make a Volume representation compatible with a target version."""
# Convert all related objects
super(Volume, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
# Before v1.4 we didn't have cluster fields so we have to remove them.
if target_version < (1, 4):
for obj_field in ('cluster', 'cluster_name'):
primitive.pop(obj_field, None)
@classmethod
def _from_db_object(cls, context, volume, db_volume, expected_attrs=None):
@ -277,6 +288,16 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
objects.Snapshot,
db_volume['snapshots'])
volume.snapshots = snapshots
if 'cluster' in expected_attrs:
db_cluster = db_volume.get('cluster')
# If this volume doesn't belong to a cluster the cluster field in
# the ORM instance will have value of None.
if db_cluster:
volume.cluster = objects.Cluster(context)
objects.Cluster._from_db_object(context, volume.cluster,
db_cluster)
else:
volume.cluster = None
volume._context = context
volume.obj_reset_changes()
@ -294,6 +315,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
if 'snapshots' in updates:
raise exception.ObjectActionError(
action='create', reason=_('snapshots assigned'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
db_volume = db.volume_create(self._context, updates)
self._from_db_object(self._context, self, db_volume)
@ -310,6 +334,9 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
if 'snapshots' in updates:
raise exception.ObjectActionError(
action='save', reason=_('snapshots changed'))
if 'cluster' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cluster changed'))
if 'metadata' in updates:
# Metadata items that are not specified in the
# self.metadata will be deleted
@ -375,6 +402,14 @@ class Volume(base.CinderPersistentObject, base.CinderObject,
elif attrname == 'snapshots':
self.snapshots = objects.SnapshotList.get_all_for_volume(
self._context, self.id)
elif attrname == 'cluster':
# If this volume doesn't belong to a cluster (cluster_name is
# empty), then cluster field will be None.
if self.cluster_name:
self.cluster = objects.Cluster.get_by_id(
self._context, name=self.cluster_name)
else:
self.cluster = None
self.obj_reset_changes(fields=[attrname])
@ -440,8 +475,27 @@ class VolumeList(base.ObjectListBase, base.CinderObject):
'objects': fields.ListOfObjectsField('Volume'),
}
@staticmethod
def include_in_cluster(context, cluster, partial_rename=True, **filters):
"""Include all volumes matching the filters into a cluster.
When partial_rename is set we will not set the cluster_name with
cluster parameter value directly, we'll replace provided cluster_name
or host filter value with cluster instead.
This is useful when we want to replace just the cluster name but leave
the backend and pool information as it is. If we are using
cluster_name to filter, we'll use that same DB field to replace the
cluster value and leave the rest as it is. Likewise if we use the host
to filter.
Returns the number of volumes that have been changed.
"""
return db.volume_include_in_cluster(context, cluster, partial_rename,
**filters)
@classmethod
def _get_expected_attrs(cls, context):
def _get_expected_attrs(cls, context, *args, **kwargs):
expected_attrs = ['metadata', 'volume_type']
if context.is_admin:
expected_attrs.append('admin_metadata')

View File

@ -59,7 +59,7 @@ class VolumeType(base.CinderPersistentObject, base.CinderObject,
primitive['extra_specs'][k] = ''
@classmethod
def _get_expected_attrs(cls, context):
def _get_expected_attrs(cls, context, *args, **kwargs):
return 'extra_specs', 'projects'
@staticmethod

View File

@ -0,0 +1,70 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from oslo_versionedobjects import fields
from cinder.db.sqlalchemy import models
from cinder import objects
def cluster_basic_fields():
"""Return basic fields for a cluster."""
return {
'id': 1,
'created_at': timeutils.utcnow(with_timezone=False),
'deleted': False,
'name': 'cluster_name',
'binary': 'cinder-volume',
'race_preventer': 0,
}
def fake_cluster_orm(**updates):
"""Create a fake ORM cluster instance."""
db_cluster = fake_db_cluster(**updates)
del db_cluster['services']
cluster = models.Cluster(**db_cluster)
return cluster
def fake_db_cluster(**updates):
"""Helper method for fake_cluster_orm.
Creates a complete dictionary filling missing fields based on the Cluster
field definition (defaults and nullable).
"""
db_cluster = cluster_basic_fields()
for name, field in objects.Cluster.fields.items():
if name in db_cluster:
continue
if field.default != fields.UnspecifiedDefault:
db_cluster[name] = field.default
elif field.nullable:
db_cluster[name] = None
else:
raise Exception('fake_db_cluster needs help with %s.' % name)
if updates:
db_cluster.update(updates)
return db_cluster
def fake_cluster_ovo(context, **updates):
"""Create a fake Cluster versioned object."""
return objects.Cluster._from_db_object(context, objects.Cluster(),
fake_cluster_orm(**updates))

View File

@ -45,11 +45,12 @@ class BaseObjectsTestCase(test.TestCase):
# base class" error
continue
if field in ('modified_at', 'created_at',
'updated_at', 'deleted_at') and db[field]:
obj_field = getattr(obj, field)
if field in ('modified_at', 'created_at', 'updated_at',
'deleted_at', 'last_heartbeat') and db[field]:
test.assertEqual(db[field],
timeutils.normalize_time(obj[field]))
elif isinstance(obj[field], obj_base.ObjectListBase):
test.assertEqual(db[field], obj[field].objects)
timeutils.normalize_time(obj_field))
elif isinstance(obj_field, obj_base.ObjectListBase):
test.assertEqual(db[field], obj_field.objects)
else:
test.assertEqual(db[field], obj[field])
test.assertEqual(db[field], obj_field)

View File

@ -0,0 +1,135 @@
# Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_utils import timeutils
from cinder import objects
from cinder.tests.unit import fake_cluster
from cinder.tests.unit import objects as test_objects
from cinder import utils
def _get_filters_sentinel():
return {'session': mock.sentinel.session,
'name_match_level': mock.sentinel.name_match_level,
'read_deleted': mock.sentinel.read_deleted,
'get_services': mock.sentinel.get_services,
'services_summary': mock.sentinel.services_summary,
'name': mock.sentinel.name,
'binary': mock.sentinel.binary,
'is_up': mock.sentinel.is_up,
'disabled': mock.sentinel.disabled,
'disabled_reason': mock.sentinel.disabled_reason,
'race_preventer': mock.sentinel.race_preventer,
'last_heartbeat': mock.sentinel.last_heartbeat,
'num_hosts': mock.sentinel.num_hosts,
'num_down_hosts': mock.sentinel.num_down_hosts}
class TestCluster(test_objects.BaseObjectsTestCase):
"""Test Cluster Versioned Object methods."""
cluster = fake_cluster.fake_cluster_orm()
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster)
def test_get_by_id(self, cluster_get_mock):
filters = _get_filters_sentinel()
cluster = objects.Cluster.get_by_id(self.context,
mock.sentinel.cluster_id,
**filters)
self.assertIsInstance(cluster, objects.Cluster)
self._compare(self, self.cluster, cluster)
cluster_get_mock.assert_called_once_with(self.context,
mock.sentinel.cluster_id,
**filters)
@mock.patch('cinder.db.sqlalchemy.api.cluster_create',
return_value=cluster)
def test_create(self, cluster_create_mock):
cluster = objects.Cluster(context=self.context, name='cluster_name')
cluster.create()
self.assertEqual(self.cluster.id, cluster.id)
cluster_create_mock.assert_called_once_with(self.context,
{'name': 'cluster_name'})
@mock.patch('cinder.db.sqlalchemy.api.cluster_update',
return_value=cluster)
def test_save(self, cluster_update_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.disabled = True
cluster.save()
cluster_update_mock.assert_called_once_with(self.context, cluster.id,
{'disabled': True})
@mock.patch('cinder.db.sqlalchemy.api.cluster_destroy')
def test_destroy(self, cluster_destroy_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.destroy()
cluster_destroy_mock.assert_called_once_with(mock.ANY, cluster.id)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster)
def test_refresh(self, cluster_get_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.refresh()
cluster_get_mock.assert_called_once_with(self.context, cluster.id)
def test_is_up_no_last_hearbeat(self):
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=None)
self.assertFalse(cluster.is_up())
def test_is_up(self):
cluster = fake_cluster.fake_cluster_ovo(
self.context,
last_heartbeat=timeutils.utcnow(with_timezone=True))
self.assertTrue(cluster.is_up())
def test_is_up_limit(self):
limit_expired = (utils.service_expired_time(True) +
timeutils.datetime.timedelta(seconds=1))
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=limit_expired)
self.assertTrue(cluster.is_up())
def test_is_up_down(self):
expired_time = (utils.service_expired_time(True) -
timeutils.datetime.timedelta(seconds=1))
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=expired_time)
self.assertFalse(cluster.is_up())
class TestClusterList(test_objects.BaseObjectsTestCase):
"""Test ClusterList Versioned Object methods."""
@mock.patch('cinder.db.sqlalchemy.api.cluster_get_all')
def test_cluster_get_all(self, cluster_get_all_mock):
orm_values = [
fake_cluster.fake_cluster_orm(),
fake_cluster.fake_cluster_orm(id=2, name='cluster_name2'),
]
cluster_get_all_mock.return_value = orm_values
filters = _get_filters_sentinel()
result = objects.ClusterList.get_all(self.context, **filters)
cluster_get_all_mock.assert_called_once_with(
self.context, filters.pop('is_up'), filters.pop('get_services'),
filters.pop('services_summary'), filters.pop('read_deleted'),
filters.pop('name_match_level'), **filters)
self.assertEqual(2, len(result))
for i in range(len(result)):
self.assertIsInstance(result[i], objects.Cluster)
self._compare(self, orm_values[i], result[i])

View File

@ -257,3 +257,23 @@ class TestConsistencyGroupList(test_objects.BaseObjectsTestCase):
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
TestConsistencyGroup._compare(self, fake_consistencygroup,
consistencygroups[0])
@mock.patch('cinder.db.consistencygroup_include_in_cluster')
def test_include_in_cluster(self, include_mock):
filters = {'host': mock.sentinel.host,
'cluster_name': mock.sentinel.cluster_name}
cluster = 'new_cluster'
objects.ConsistencyGroupList.include_in_cluster(self.context, cluster,
**filters)
include_mock.assert_called_once_with(self.context, cluster, True,
**filters)
@mock.patch('cinder.db.consistencygroup_include_in_cluster')
def test_include_in_cluster_specify_partial(self, include_mock):
filters = {'host': mock.sentinel.host,
'cluster_name': mock.sentinel.cluster_name}
cluster = 'new_cluster'
objects.ConsistencyGroupList.include_in_cluster(
self.context, cluster, mock.sentinel.partial_rename, **filters)
include_mock.assert_called_once_with(
self.context, cluster, mock.sentinel.partial_rename, **filters)

View File

@ -26,17 +26,19 @@ object_data = {
'Backup': '1.4-c50f7a68bb4c400dd53dd219685b3992',
'BackupImport': '1.4-c50f7a68bb4c400dd53dd219685b3992',
'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Cluster': '1.0-6f06e867c073e9d31722c53b0a9329b8',
'ClusterList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'CGSnapshot': '1.0-3212ac2b4c2811b7134fb9ba2c49ff74',
'CGSnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'ConsistencyGroup': '1.2-ff7638e03ae7a3bb7a43a6c5c4d0c94a',
'ConsistencyGroup': '1.3-7bf01a79b82516639fc03cd3ab6d9c01',
'ConsistencyGroupList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'QualityOfServiceSpecs': '1.0-0b212e0a86ee99092229874e03207fe8',
'QualityOfServiceSpecsList': '1.0-1b54e51ad0fc1f3a8878f5010e7e16dc',
'Service': '1.3-d7c1e133791c9d766596a0528fc9a12f',
'Service': '1.4-c7d011989d1718ca0496ccf640b42712',
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Snapshot': '1.1-37966f7141646eb29e9ad5298ff2ca8a',
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Volume': '1.3-15ff1f42d4e8eb321aa8217dd46aa1e1',
'Volume': '1.4-cd0fc67e0ea8c9a28d9dce6b21368e01',
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'VolumeAttachment': '1.0-b30dacf62b2030dd83d8a1603f1064ff',
'VolumeAttachmentList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',

View File

@ -453,3 +453,24 @@ class TestVolumeList(test_objects.BaseObjectsTestCase):
mock.sentinel.sorted_dirs, mock.sentinel.filters)
self.assertEqual(1, len(volumes))
TestVolume._compare(self, db_volume, volumes[0])
@mock.patch('cinder.db.volume_include_in_cluster')
def test_include_in_cluster(self, include_mock):
filters = {'host': mock.sentinel.host,
'cluster_name': mock.sentinel.cluster_name}
cluster = 'new_cluster'
objects.VolumeList.include_in_cluster(self.context, cluster, **filters)
include_mock.assert_called_once_with(self.context, cluster, True,
**filters)
@mock.patch('cinder.db.volume_include_in_cluster')
def test_include_in_cluster_specify_partial(self, include_mock):
filters = {'host': mock.sentinel.host,
'cluster_name': mock.sentinel.cluster_name}
cluster = 'new_cluster'
objects.VolumeList.include_in_cluster(self.context, cluster,
mock.sentinel.partial_rename,
**filters)
include_mock.assert_called_once_with(self.context, cluster,
mock.sentinel.partial_rename,
**filters)

View File

@ -1027,6 +1027,6 @@ def validate_dictionary_string_length(specs):
min_length=0, max_length=255)
def service_expired_time():
return (timeutils.utcnow() -
def service_expired_time(with_timezone=False):
return (timeutils.utcnow(with_timezone=with_timezone) -
datetime.timedelta(seconds=CONF.service_down_time))