Allocate uuids for aggregates as they are created or loaded
Note that this filters uuid from the aggregate view in the API because we would need a microversion. That may be a thing in the future (for the 2.x api) but not something we can do here. Related to blueprint generic-resource-pools Change-Id: I45006e546867d348563831986b91a317029a1173
This commit is contained in:
parent
49fff67698
commit
9a0ed11827
@ -209,8 +209,11 @@ class AggregateController(wsgi.Controller):
|
|||||||
def _build_aggregate_items(self, aggregate):
|
def _build_aggregate_items(self, aggregate):
|
||||||
keys = aggregate.obj_fields
|
keys = aggregate.obj_fields
|
||||||
for key in keys:
|
for key in keys:
|
||||||
if (aggregate.obj_attr_is_set(key)
|
# NOTE(danms): Skip the uuid field because we have no microversion
|
||||||
or key in aggregate.obj_extra_fields):
|
# to expose it
|
||||||
|
if ((aggregate.obj_attr_is_set(key)
|
||||||
|
or key in aggregate.obj_extra_fields) and
|
||||||
|
key != 'uuid'):
|
||||||
yield key, getattr(aggregate, key)
|
yield key, getattr(aggregate, key)
|
||||||
|
|
||||||
|
|
||||||
|
@ -278,8 +278,11 @@ class AggregateController(object):
|
|||||||
def _build_aggregate_items(self, aggregate):
|
def _build_aggregate_items(self, aggregate):
|
||||||
keys = aggregate.obj_fields
|
keys = aggregate.obj_fields
|
||||||
for key in keys:
|
for key in keys:
|
||||||
if (aggregate.obj_attr_is_set(key)
|
# NOTE(danms): Skip the uuid field because we have no microversion
|
||||||
or key in aggregate.obj_extra_fields):
|
# to expose it
|
||||||
|
if ((aggregate.obj_attr_is_set(key)
|
||||||
|
or key in aggregate.obj_extra_fields) and
|
||||||
|
key != 'uuid'):
|
||||||
yield key, getattr(aggregate, key)
|
yield key, getattr(aggregate, key)
|
||||||
|
|
||||||
|
|
||||||
|
@ -12,6 +12,9 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from nova.compute import utils as compute_utils
|
from nova.compute import utils as compute_utils
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@ -19,15 +22,19 @@ from nova import objects
|
|||||||
from nova.objects import base
|
from nova.objects import base
|
||||||
from nova.objects import fields
|
from nova.objects import fields
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@base.NovaObjectRegistry.register
|
@base.NovaObjectRegistry.register
|
||||||
class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
||||||
# Version 1.0: Initial version
|
# Version 1.0: Initial version
|
||||||
# Version 1.1: String attributes updated to support unicode
|
# Version 1.1: String attributes updated to support unicode
|
||||||
VERSION = '1.1'
|
# Version 1.2: Added uuid field
|
||||||
|
VERSION = '1.2'
|
||||||
|
|
||||||
fields = {
|
fields = {
|
||||||
'id': fields.IntegerField(),
|
'id': fields.IntegerField(),
|
||||||
|
'uuid': fields.UUIDField(nullable=False),
|
||||||
'name': fields.StringField(),
|
'name': fields.StringField(),
|
||||||
'hosts': fields.ListOfStringsField(nullable=True),
|
'hosts': fields.ListOfStringsField(nullable=True),
|
||||||
'metadata': fields.DictOfStringsField(nullable=True),
|
'metadata': fields.DictOfStringsField(nullable=True),
|
||||||
@ -40,11 +47,31 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
|||||||
for key in aggregate.fields:
|
for key in aggregate.fields:
|
||||||
if key == 'metadata':
|
if key == 'metadata':
|
||||||
db_key = 'metadetails'
|
db_key = 'metadetails'
|
||||||
|
elif key == 'uuid':
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
db_key = key
|
db_key = key
|
||||||
setattr(aggregate, key, db_aggregate[db_key])
|
setattr(aggregate, key, db_aggregate[db_key])
|
||||||
|
|
||||||
|
# NOTE(danms): Remove this conditional load (and remove uuid
|
||||||
|
# special cases above) once we're in Newton and have enforced
|
||||||
|
# that all UUIDs in the database are not NULL.
|
||||||
|
if db_aggregate.get('uuid'):
|
||||||
|
aggregate.uuid = db_aggregate['uuid']
|
||||||
|
|
||||||
aggregate._context = context
|
aggregate._context = context
|
||||||
aggregate.obj_reset_changes()
|
aggregate.obj_reset_changes()
|
||||||
|
|
||||||
|
# NOTE(danms): This needs to come after obj_reset_changes() to make
|
||||||
|
# sure we only save the uuid, if we generate one.
|
||||||
|
# FIXME(danms): Remove this in Newton once we have enforced that
|
||||||
|
# all aggregates have uuids set in the database.
|
||||||
|
if 'uuid' not in aggregate:
|
||||||
|
aggregate.uuid = uuidutils.generate_uuid()
|
||||||
|
LOG.debug('Generating UUID %(uuid)s for aggregate %(agg)i',
|
||||||
|
dict(uuid=aggregate.uuid, agg=aggregate.id))
|
||||||
|
aggregate.save()
|
||||||
|
|
||||||
return aggregate
|
return aggregate
|
||||||
|
|
||||||
def _assert_no_hosts(self, action):
|
def _assert_no_hosts(self, action):
|
||||||
@ -69,6 +96,10 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
|||||||
if 'metadata' in updates:
|
if 'metadata' in updates:
|
||||||
# NOTE(danms): For some reason the notification format is weird
|
# NOTE(danms): For some reason the notification format is weird
|
||||||
payload['meta_data'] = payload.pop('metadata')
|
payload['meta_data'] = payload.pop('metadata')
|
||||||
|
if 'uuid' not in updates:
|
||||||
|
updates['uuid'] = uuidutils.generate_uuid()
|
||||||
|
LOG.debug('Generated uuid %(uuid)s for aggregate',
|
||||||
|
dict(uuid=updates['uuid']))
|
||||||
compute_utils.notify_about_aggregate_update(self._context,
|
compute_utils.notify_about_aggregate_update(self._context,
|
||||||
"create.start",
|
"create.start",
|
||||||
payload)
|
payload)
|
||||||
|
@ -28,6 +28,7 @@ from nova import objects
|
|||||||
from nova import test
|
from nova import test
|
||||||
from nova.tests.unit.api.openstack import fakes
|
from nova.tests.unit.api.openstack import fakes
|
||||||
from nova.tests.unit import matchers
|
from nova.tests.unit import matchers
|
||||||
|
from nova.tests import uuidsentinel
|
||||||
|
|
||||||
|
|
||||||
def _make_agg_obj(agg_dict):
|
def _make_agg_obj(agg_dict):
|
||||||
@ -699,7 +700,7 @@ class AggregateTestCaseV21(test.NoDBTestCase):
|
|||||||
# We would expect the dictionary that comes out is the same one
|
# We would expect the dictionary that comes out is the same one
|
||||||
# that we pump into the aggregate object in the first place
|
# that we pump into the aggregate object in the first place
|
||||||
agg = {'name': 'aggregate1',
|
agg = {'name': 'aggregate1',
|
||||||
'id': 1,
|
'id': 1, 'uuid': uuidsentinel.aggregate,
|
||||||
'metadata': {'foo': 'bar', 'availability_zone': 'nova'},
|
'metadata': {'foo': 'bar', 'availability_zone': 'nova'},
|
||||||
'hosts': ['host1', 'host2']}
|
'hosts': ['host1', 'host2']}
|
||||||
agg_obj = _make_agg_obj(agg)
|
agg_obj = _make_agg_obj(agg)
|
||||||
@ -708,6 +709,7 @@ class AggregateTestCaseV21(test.NoDBTestCase):
|
|||||||
# _marshall_aggregate() puts all fields and obj_extra_fields in the
|
# _marshall_aggregate() puts all fields and obj_extra_fields in the
|
||||||
# top-level dict, so we need to put availability_zone at the top also
|
# top-level dict, so we need to put availability_zone at the top also
|
||||||
agg['availability_zone'] = 'nova'
|
agg['availability_zone'] = 'nova'
|
||||||
|
del agg['uuid']
|
||||||
self.assertEqual(agg, marshalled_agg['aggregate'])
|
self.assertEqual(agg, marshalled_agg['aggregate'])
|
||||||
|
|
||||||
def _assert_agg_data(self, expected, actual):
|
def _assert_agg_data(self, expected, actual):
|
||||||
|
@ -20,6 +20,7 @@ from nova import exception
|
|||||||
from nova.objects import aggregate
|
from nova.objects import aggregate
|
||||||
from nova.tests.unit import fake_notifier
|
from nova.tests.unit import fake_notifier
|
||||||
from nova.tests.unit.objects import test_objects
|
from nova.tests.unit.objects import test_objects
|
||||||
|
from nova.tests import uuidsentinel
|
||||||
|
|
||||||
|
|
||||||
NOW = timeutils.utcnow().replace(microsecond=0)
|
NOW = timeutils.utcnow().replace(microsecond=0)
|
||||||
@ -29,6 +30,7 @@ fake_aggregate = {
|
|||||||
'deleted_at': None,
|
'deleted_at': None,
|
||||||
'deleted': False,
|
'deleted': False,
|
||||||
'id': 123,
|
'id': 123,
|
||||||
|
'uuid': uuidsentinel.fake_aggregate,
|
||||||
'name': 'fake-aggregate',
|
'name': 'fake-aggregate',
|
||||||
'hosts': ['foo', 'bar'],
|
'hosts': ['foo', 'bar'],
|
||||||
'metadetails': {'this': 'that'},
|
'metadetails': {'this': 'that'},
|
||||||
@ -45,25 +47,43 @@ class _TestAggregateObject(object):
|
|||||||
agg = aggregate.Aggregate.get_by_id(self.context, 123)
|
agg = aggregate.Aggregate.get_by_id(self.context, 123)
|
||||||
self.compare_obj(agg, fake_aggregate, subs=SUBS)
|
self.compare_obj(agg, fake_aggregate, subs=SUBS)
|
||||||
|
|
||||||
|
@mock.patch('nova.objects.Aggregate.save')
|
||||||
|
@mock.patch('nova.db.aggregate_get')
|
||||||
|
def test_load_allocates_uuid(self, mock_get, mock_save):
|
||||||
|
fake_agg = dict(fake_aggregate)
|
||||||
|
del fake_agg['uuid']
|
||||||
|
mock_get.return_value = fake_agg
|
||||||
|
uuid = uuidsentinel.aggregate
|
||||||
|
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_g:
|
||||||
|
mock_g.return_value = uuid
|
||||||
|
obj = aggregate.Aggregate.get_by_id(self.context, 123)
|
||||||
|
mock_g.assert_called_once_with()
|
||||||
|
self.assertEqual(uuid, obj.uuid)
|
||||||
|
mock_save.assert_called_once_with()
|
||||||
|
|
||||||
def test_create(self):
|
def test_create(self):
|
||||||
self.mox.StubOutWithMock(db, 'aggregate_create')
|
self.mox.StubOutWithMock(db, 'aggregate_create')
|
||||||
db.aggregate_create(self.context, {'name': 'foo'},
|
db.aggregate_create(self.context, {'name': 'foo',
|
||||||
|
'uuid': uuidsentinel.fake_agg},
|
||||||
metadata={'one': 'two'}).AndReturn(fake_aggregate)
|
metadata={'one': 'two'}).AndReturn(fake_aggregate)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
agg = aggregate.Aggregate(context=self.context)
|
agg = aggregate.Aggregate(context=self.context)
|
||||||
agg.name = 'foo'
|
agg.name = 'foo'
|
||||||
agg.metadata = {'one': 'two'}
|
agg.metadata = {'one': 'two'}
|
||||||
|
agg.uuid = uuidsentinel.fake_agg
|
||||||
agg.create()
|
agg.create()
|
||||||
self.compare_obj(agg, fake_aggregate, subs=SUBS)
|
self.compare_obj(agg, fake_aggregate, subs=SUBS)
|
||||||
|
|
||||||
def test_recreate_fails(self):
|
def test_recreate_fails(self):
|
||||||
self.mox.StubOutWithMock(db, 'aggregate_create')
|
self.mox.StubOutWithMock(db, 'aggregate_create')
|
||||||
db.aggregate_create(self.context, {'name': 'foo'},
|
db.aggregate_create(self.context, {'name': 'foo',
|
||||||
|
'uuid': uuidsentinel.fake_agg},
|
||||||
metadata={'one': 'two'}).AndReturn(fake_aggregate)
|
metadata={'one': 'two'}).AndReturn(fake_aggregate)
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
agg = aggregate.Aggregate(context=self.context)
|
agg = aggregate.Aggregate(context=self.context)
|
||||||
agg.name = 'foo'
|
agg.name = 'foo'
|
||||||
agg.metadata = {'one': 'two'}
|
agg.metadata = {'one': 'two'}
|
||||||
|
agg.uuid = uuidsentinel.fake_agg
|
||||||
agg.create()
|
agg.create()
|
||||||
self.assertRaises(exception.ObjectActionError, agg.create)
|
self.assertRaises(exception.ObjectActionError, agg.create)
|
||||||
|
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
upgrade:
|
||||||
|
- Upon first startup of the scheduler service in Mitaka, all defined
|
||||||
|
aggregates will have UUIDs generated and saved back to the
|
||||||
|
database. If you have a significant number of aggregates, this may
|
||||||
|
delay scheduler start as that work is completed, but it should be
|
||||||
|
minor for most deployments.
|
Loading…
Reference in New Issue
Block a user