2013-08-19 15:21:53 -07:00
|
|
|
# Copyright 2013 IBM Corp.
|
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
|
|
# not use this file except in compliance with the License. You may obtain
|
|
|
|
# a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
|
|
# License for the specific language governing permissions and limitations
|
|
|
|
# under the License.
|
|
|
|
|
2016-02-19 11:50:44 -08:00
|
|
|
from oslo_log import log as logging
|
|
|
|
from oslo_utils import uuidutils
|
|
|
|
|
2016-03-22 14:46:56 -05:00
|
|
|
from sqlalchemy.orm import contains_eager
|
2016-03-22 10:13:15 -05:00
|
|
|
from sqlalchemy.orm import joinedload
|
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
from nova.compute import utils as compute_utils
|
|
|
|
from nova import db
|
2016-03-22 10:13:15 -05:00
|
|
|
from nova.db.sqlalchemy import api as db_api
|
|
|
|
from nova.db.sqlalchemy import api_models
|
2013-08-19 15:21:53 -07:00
|
|
|
from nova import exception
|
2014-05-17 16:06:33 -07:00
|
|
|
from nova import objects
|
2013-08-19 15:21:53 -07:00
|
|
|
from nova.objects import base
|
2013-10-03 09:32:43 -07:00
|
|
|
from nova.objects import fields
|
2013-08-19 15:21:53 -07:00
|
|
|
|
2016-02-19 11:50:44 -08:00
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
2016-03-22 10:13:15 -05:00
|
|
|
DEPRECATED_FIELDS = ['deleted', 'deleted_at']
|
|
|
|
|
|
|
|
|
|
|
|
@db_api.api_context_manager.reader
|
|
|
|
def _aggregate_get_from_db(context, aggregate_id):
|
|
|
|
query = context.session.query(api_models.Aggregate).\
|
|
|
|
options(joinedload('_hosts')).\
|
|
|
|
options(joinedload('_metadata'))
|
|
|
|
query = query.filter(api_models.Aggregate.id == aggregate_id)
|
|
|
|
|
|
|
|
aggregate = query.first()
|
|
|
|
|
|
|
|
if not aggregate:
|
|
|
|
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
|
|
|
|
|
|
|
|
return aggregate
|
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
|
2016-06-12 14:29:23 -05:00
|
|
|
@db_api.api_context_manager.reader
|
|
|
|
def _aggregate_get_from_db_by_uuid(context, aggregate_uuid):
|
|
|
|
query = context.session.query(api_models.Aggregate).\
|
|
|
|
options(joinedload('_hosts')).\
|
|
|
|
options(joinedload('_metadata'))
|
|
|
|
query = query.filter(api_models.Aggregate.uuid == aggregate_uuid)
|
|
|
|
|
|
|
|
aggregate = query.first()
|
|
|
|
|
|
|
|
if not aggregate:
|
|
|
|
raise exception.AggregateNotFound(aggregate_id=aggregate_uuid)
|
|
|
|
|
|
|
|
return aggregate
|
|
|
|
|
|
|
|
|
2015-06-02 09:54:45 -07:00
|
|
|
@base.NovaObjectRegistry.register
|
2016-01-06 22:50:54 +00:00
|
|
|
class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
2013-09-12 13:40:57 -07:00
|
|
|
# Version 1.0: Initial version
|
|
|
|
# Version 1.1: String attributes updated to support unicode
|
2016-02-19 11:50:44 -08:00
|
|
|
# Version 1.2: Added uuid field
|
2016-02-25 18:36:51 +00:00
|
|
|
# Version 1.3: Added get_by_uuid method
|
|
|
|
VERSION = '1.3'
|
2013-09-12 13:40:57 -07:00
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
fields = {
|
2013-10-03 09:32:43 -07:00
|
|
|
'id': fields.IntegerField(),
|
2016-02-19 11:50:44 -08:00
|
|
|
'uuid': fields.UUIDField(nullable=False),
|
2013-10-03 09:32:43 -07:00
|
|
|
'name': fields.StringField(),
|
|
|
|
'hosts': fields.ListOfStringsField(nullable=True),
|
|
|
|
'metadata': fields.DictOfStringsField(nullable=True),
|
2013-08-19 15:21:53 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
obj_extra_fields = ['availability_zone']
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _from_db_object(context, aggregate, db_aggregate):
|
|
|
|
for key in aggregate.fields:
|
|
|
|
if key == 'metadata':
|
|
|
|
db_key = 'metadetails'
|
2016-02-19 11:50:44 -08:00
|
|
|
elif key == 'uuid':
|
|
|
|
continue
|
2016-03-22 10:13:15 -05:00
|
|
|
elif key in DEPRECATED_FIELDS and key not in db_aggregate:
|
|
|
|
continue
|
2013-08-19 15:21:53 -07:00
|
|
|
else:
|
|
|
|
db_key = key
|
2016-01-06 22:50:54 +00:00
|
|
|
setattr(aggregate, key, db_aggregate[db_key])
|
2016-02-19 11:50:44 -08:00
|
|
|
|
|
|
|
# NOTE(danms): Remove this conditional load (and remove uuid
|
|
|
|
# special cases above) once we're in Newton and have enforced
|
|
|
|
# that all UUIDs in the database are not NULL.
|
|
|
|
if db_aggregate.get('uuid'):
|
|
|
|
aggregate.uuid = db_aggregate['uuid']
|
|
|
|
|
2016-03-22 10:13:15 -05:00
|
|
|
# NOTE: This can be removed when we remove compatibility with
|
|
|
|
# the old aggregate model.
|
|
|
|
if any(f not in db_aggregate for f in DEPRECATED_FIELDS):
|
|
|
|
aggregate.deleted_at = None
|
|
|
|
aggregate.deleted = False
|
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
aggregate._context = context
|
|
|
|
aggregate.obj_reset_changes()
|
2016-02-19 11:50:44 -08:00
|
|
|
|
|
|
|
# NOTE(danms): This needs to come after obj_reset_changes() to make
|
|
|
|
# sure we only save the uuid, if we generate one.
|
|
|
|
# FIXME(danms): Remove this in Newton once we have enforced that
|
|
|
|
# all aggregates have uuids set in the database.
|
|
|
|
if 'uuid' not in aggregate:
|
|
|
|
aggregate.uuid = uuidutils.generate_uuid()
|
|
|
|
LOG.debug('Generating UUID %(uuid)s for aggregate %(agg)i',
|
|
|
|
dict(uuid=aggregate.uuid, agg=aggregate.id))
|
|
|
|
aggregate.save()
|
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
return aggregate
|
|
|
|
|
|
|
|
def _assert_no_hosts(self, action):
|
|
|
|
if 'hosts' in self.obj_what_changed():
|
|
|
|
raise exception.ObjectActionError(
|
|
|
|
action=action,
|
|
|
|
reason='hosts updated inline')
|
|
|
|
|
|
|
|
@base.remotable_classmethod
|
|
|
|
def get_by_id(cls, context, aggregate_id):
|
2016-03-22 10:13:15 -05:00
|
|
|
try:
|
|
|
|
db_aggregate = _aggregate_get_from_db(context, aggregate_id)
|
|
|
|
except exception.AggregateNotFound:
|
|
|
|
db_aggregate = db.aggregate_get(context, aggregate_id)
|
2013-08-19 15:21:53 -07:00
|
|
|
return cls._from_db_object(context, cls(), db_aggregate)
|
|
|
|
|
2016-02-25 18:36:51 +00:00
|
|
|
@base.remotable_classmethod
|
|
|
|
def get_by_uuid(cls, context, aggregate_uuid):
|
2016-06-12 14:29:23 -05:00
|
|
|
try:
|
|
|
|
db_aggregate = _aggregate_get_from_db_by_uuid(context,
|
|
|
|
aggregate_uuid)
|
|
|
|
except exception.AggregateNotFound:
|
|
|
|
db_aggregate = db.aggregate_get_by_uuid(context, aggregate_uuid)
|
2016-02-25 18:36:51 +00:00
|
|
|
return cls._from_db_object(context, cls(), db_aggregate)
|
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
@base.remotable
|
2015-03-13 10:17:45 -07:00
|
|
|
def create(self):
|
2014-01-07 15:07:50 +08:00
|
|
|
if self.obj_attr_is_set('id'):
|
|
|
|
raise exception.ObjectActionError(action='create',
|
|
|
|
reason='already created')
|
2013-08-19 15:21:53 -07:00
|
|
|
self._assert_no_hosts('create')
|
2013-09-05 16:37:22 -07:00
|
|
|
updates = self.obj_get_changes()
|
2013-08-19 15:21:53 -07:00
|
|
|
payload = dict(updates)
|
|
|
|
if 'metadata' in updates:
|
|
|
|
# NOTE(danms): For some reason the notification format is weird
|
|
|
|
payload['meta_data'] = payload.pop('metadata')
|
2016-02-19 11:50:44 -08:00
|
|
|
if 'uuid' not in updates:
|
|
|
|
updates['uuid'] = uuidutils.generate_uuid()
|
|
|
|
LOG.debug('Generated uuid %(uuid)s for aggregate',
|
|
|
|
dict(uuid=updates['uuid']))
|
2015-03-13 09:37:06 -07:00
|
|
|
compute_utils.notify_about_aggregate_update(self._context,
|
2013-08-19 15:21:53 -07:00
|
|
|
"create.start",
|
|
|
|
payload)
|
|
|
|
metadata = updates.pop('metadata', None)
|
2015-03-13 09:37:06 -07:00
|
|
|
db_aggregate = db.aggregate_create(self._context, updates,
|
|
|
|
metadata=metadata)
|
|
|
|
self._from_db_object(self._context, self, db_aggregate)
|
2013-08-19 15:21:53 -07:00
|
|
|
payload['aggregate_id'] = self.id
|
2015-03-13 09:37:06 -07:00
|
|
|
compute_utils.notify_about_aggregate_update(self._context,
|
2013-08-19 15:21:53 -07:00
|
|
|
"create.end",
|
|
|
|
payload)
|
|
|
|
|
|
|
|
@base.remotable
|
2015-03-13 10:17:45 -07:00
|
|
|
def save(self):
|
2013-08-19 15:21:53 -07:00
|
|
|
self._assert_no_hosts('save')
|
2013-09-05 16:37:22 -07:00
|
|
|
updates = self.obj_get_changes()
|
2013-08-19 15:21:53 -07:00
|
|
|
|
|
|
|
payload = {'aggregate_id': self.id}
|
|
|
|
if 'metadata' in updates:
|
|
|
|
payload['meta_data'] = updates['metadata']
|
2015-03-13 09:37:06 -07:00
|
|
|
compute_utils.notify_about_aggregate_update(self._context,
|
2013-08-19 15:21:53 -07:00
|
|
|
"updateprop.start",
|
|
|
|
payload)
|
|
|
|
updates.pop('id', None)
|
2015-03-13 09:37:06 -07:00
|
|
|
db_aggregate = db.aggregate_update(self._context, self.id, updates)
|
|
|
|
compute_utils.notify_about_aggregate_update(self._context,
|
2013-08-19 15:21:53 -07:00
|
|
|
"updateprop.end",
|
|
|
|
payload)
|
2015-03-13 09:37:06 -07:00
|
|
|
self._from_db_object(self._context, self, db_aggregate)
|
2013-08-19 15:21:53 -07:00
|
|
|
|
|
|
|
@base.remotable
|
2015-03-13 10:17:45 -07:00
|
|
|
def update_metadata(self, updates):
|
2013-08-19 15:21:53 -07:00
|
|
|
payload = {'aggregate_id': self.id,
|
|
|
|
'meta_data': updates}
|
2015-03-13 09:37:06 -07:00
|
|
|
compute_utils.notify_about_aggregate_update(self._context,
|
2013-08-19 15:21:53 -07:00
|
|
|
"updatemetadata.start",
|
|
|
|
payload)
|
|
|
|
to_add = {}
|
|
|
|
for key, value in updates.items():
|
|
|
|
if value is None:
|
|
|
|
try:
|
2015-03-13 09:37:06 -07:00
|
|
|
db.aggregate_metadata_delete(self._context, self.id, key)
|
2013-08-19 15:21:53 -07:00
|
|
|
except exception.AggregateMetadataNotFound:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
self.metadata.pop(key)
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
to_add[key] = value
|
|
|
|
self.metadata[key] = value
|
2015-03-13 09:37:06 -07:00
|
|
|
db.aggregate_metadata_add(self._context, self.id, to_add)
|
|
|
|
compute_utils.notify_about_aggregate_update(self._context,
|
2013-08-19 15:21:53 -07:00
|
|
|
"updatemetadata.end",
|
|
|
|
payload)
|
|
|
|
self.obj_reset_changes(fields=['metadata'])
|
|
|
|
|
|
|
|
@base.remotable
|
2015-03-13 10:17:45 -07:00
|
|
|
def destroy(self):
|
2015-03-13 09:37:06 -07:00
|
|
|
db.aggregate_delete(self._context, self.id)
|
2013-08-19 15:21:53 -07:00
|
|
|
|
|
|
|
@base.remotable
|
2015-03-13 10:17:45 -07:00
|
|
|
def add_host(self, host):
|
2015-03-13 09:37:06 -07:00
|
|
|
db.aggregate_host_add(self._context, self.id, host)
|
2013-08-19 15:21:53 -07:00
|
|
|
if self.hosts is None:
|
|
|
|
self.hosts = []
|
|
|
|
self.hosts.append(host)
|
|
|
|
self.obj_reset_changes(fields=['hosts'])
|
|
|
|
|
|
|
|
@base.remotable
|
2015-03-13 10:17:45 -07:00
|
|
|
def delete_host(self, host):
|
2015-03-13 09:37:06 -07:00
|
|
|
db.aggregate_host_delete(self._context, self.id, host)
|
2013-08-19 15:21:53 -07:00
|
|
|
self.hosts.remove(host)
|
|
|
|
self.obj_reset_changes(fields=['hosts'])
|
|
|
|
|
|
|
|
@property
|
|
|
|
def availability_zone(self):
|
|
|
|
return self.metadata.get('availability_zone', None)
|
|
|
|
|
|
|
|
|
2016-03-22 14:46:56 -05:00
|
|
|
@db_api.api_context_manager.reader
|
|
|
|
def _get_all_from_db(context):
|
|
|
|
query = context.session.query(api_models.Aggregate).\
|
|
|
|
options(joinedload('_hosts')).\
|
|
|
|
options(joinedload('_metadata'))
|
|
|
|
|
|
|
|
return query.all()
|
|
|
|
|
|
|
|
|
|
|
|
@db_api.api_context_manager.reader
|
|
|
|
def _get_by_host_from_db(context, host, key=None):
|
|
|
|
query = context.session.query(api_models.Aggregate).\
|
|
|
|
options(joinedload('_hosts')).\
|
|
|
|
options(joinedload('_metadata'))
|
|
|
|
query = query.join('_hosts')
|
|
|
|
query = query.filter(api_models.AggregateHost.host == host)
|
|
|
|
|
|
|
|
if key:
|
|
|
|
query = query.join("_metadata").filter(
|
|
|
|
api_models.AggregateMetadata.key == key)
|
|
|
|
|
|
|
|
return query.all()
|
|
|
|
|
|
|
|
|
|
|
|
@db_api.api_context_manager.reader
|
|
|
|
def _get_by_metadata_key_from_db(context, key):
|
|
|
|
query = context.session.query(api_models.Aggregate)
|
|
|
|
query = query.join("_metadata")
|
|
|
|
query = query.filter(api_models.AggregateMetadata.key == key)
|
|
|
|
query = query.options(contains_eager("_metadata"))
|
|
|
|
query = query.options(joinedload("_hosts"))
|
|
|
|
|
|
|
|
return query.all()
|
|
|
|
|
|
|
|
|
2015-06-02 09:54:45 -07:00
|
|
|
@base.NovaObjectRegistry.register
|
2014-12-12 11:04:07 +00:00
|
|
|
class AggregateList(base.ObjectListBase, base.NovaObject):
|
2013-10-08 09:37:54 -07:00
|
|
|
# Version 1.0: Initial version
|
|
|
|
# Version 1.1: Added key argument to get_by_host()
|
Require List objects to be able to backlevel their contents
Right now, a client declares its supported version of a given object
automatically in the remoted calls it makes to conductor. However,
in the case of things like InstanceList.get_by_foo(), they are
reporting the version of their InstanceList object, not their
Instance object. Conductor fills a version-matching InstanceList
object with brand new Instance objects, which the client, of course,
barfs on.
There may be a better way to handle this going forward, but for now,
stop the bleeding by requiring a version bump to the corresponding
List object whenever the object type it contains takes a version
bump. This adds a test to validate that all the objects registered
have a suitable mapping for the current version in the tree.
Since this actually caused a breakage in the Instance object
recently, this also bumps the InstanceList version so that
conductors running this commit (or later) will properly send
version 1.9 Instance objects to Havana clients and version 1.10+
to newer ones.
Change-Id: I2668dead4784fbd0411d1b6372a9a8006eeb2e84
Related-Bug: #1258256
Closes-Bug: #1254902
2013-11-25 14:48:26 -08:00
|
|
|
# Aggregate <= version 1.1
|
2014-06-24 23:08:20 +00:00
|
|
|
# Version 1.2: Added get_by_metadata_key
|
|
|
|
VERSION = '1.2'
|
2013-10-08 09:37:54 -07:00
|
|
|
|
2013-10-14 15:47:03 -07:00
|
|
|
fields = {
|
|
|
|
'objects': fields.ListOfObjectsField('Aggregate'),
|
|
|
|
}
|
2015-06-30 22:19:17 +00:00
|
|
|
|
2016-03-22 14:46:56 -05:00
|
|
|
# NOTE(mdoff): Calls to this can be removed when we remove
|
|
|
|
# compatibility with the old aggregate model.
|
|
|
|
@staticmethod
|
|
|
|
def _fill_deprecated(db_aggregate):
|
|
|
|
db_aggregate['deleted_at'] = None
|
|
|
|
db_aggregate['deleted'] = False
|
|
|
|
return db_aggregate
|
|
|
|
|
2014-06-24 23:08:20 +00:00
|
|
|
@classmethod
|
|
|
|
def _filter_db_aggregates(cls, db_aggregates, hosts):
|
|
|
|
if not isinstance(hosts, set):
|
|
|
|
hosts = set(hosts)
|
|
|
|
filtered_aggregates = []
|
|
|
|
for db_aggregate in db_aggregates:
|
|
|
|
for host in db_aggregate['hosts']:
|
|
|
|
if host in hosts:
|
|
|
|
filtered_aggregates.append(db_aggregate)
|
|
|
|
break
|
|
|
|
return filtered_aggregates
|
|
|
|
|
2013-08-19 15:21:53 -07:00
|
|
|
@base.remotable_classmethod
|
|
|
|
def get_all(cls, context):
|
2016-03-22 14:46:56 -05:00
|
|
|
api_db_aggregates = [cls._fill_deprecated(agg) for agg in
|
|
|
|
_get_all_from_db(context)]
|
2013-08-19 15:21:53 -07:00
|
|
|
db_aggregates = db.aggregate_get_all(context)
|
2014-05-17 16:06:33 -07:00
|
|
|
return base.obj_make_list(context, cls(context), objects.Aggregate,
|
2016-03-22 14:46:56 -05:00
|
|
|
db_aggregates + api_db_aggregates)
|
2013-08-19 15:21:53 -07:00
|
|
|
|
|
|
|
@base.remotable_classmethod
|
2013-10-08 09:37:54 -07:00
|
|
|
def get_by_host(cls, context, host, key=None):
|
2016-03-22 14:46:56 -05:00
|
|
|
api_db_aggregates = [cls._fill_deprecated(agg) for agg in
|
|
|
|
_get_by_host_from_db(context, host, key=key)]
|
2013-10-08 09:37:54 -07:00
|
|
|
db_aggregates = db.aggregate_get_by_host(context, host, key=key)
|
2014-05-17 16:06:33 -07:00
|
|
|
return base.obj_make_list(context, cls(context), objects.Aggregate,
|
2016-03-22 14:46:56 -05:00
|
|
|
db_aggregates + api_db_aggregates)
|
2014-06-24 23:08:20 +00:00
|
|
|
|
|
|
|
@base.remotable_classmethod
|
|
|
|
def get_by_metadata_key(cls, context, key, hosts=None):
|
2016-03-22 14:46:56 -05:00
|
|
|
api_db_aggregates = [cls._fill_deprecated(agg) for agg in
|
|
|
|
_get_by_metadata_key_from_db(context, key=key)]
|
2014-06-24 23:08:20 +00:00
|
|
|
db_aggregates = db.aggregate_get_by_metadata_key(context, key=key)
|
2016-03-22 14:46:56 -05:00
|
|
|
|
|
|
|
all_aggregates = db_aggregates + api_db_aggregates
|
2015-02-10 18:57:51 +01:00
|
|
|
if hosts is not None:
|
2016-03-22 14:46:56 -05:00
|
|
|
all_aggregates = cls._filter_db_aggregates(all_aggregates, hosts)
|
2014-06-24 23:08:20 +00:00
|
|
|
return base.obj_make_list(context, cls(context), objects.Aggregate,
|
2016-03-22 14:46:56 -05:00
|
|
|
all_aggregates)
|