Allocate uuids for aggregates as they are created or loaded
Note that this filters uuid from the aggregate view in the API because we would need a microversion. That may be a thing in the future (for the 2.x api) but not something we can do here. Related to blueprint generic-resource-pools Change-Id: I45006e546867d348563831986b91a317029a1173
This commit is contained in:
parent
fd8185d228
commit
67324e61e0
@ -209,8 +209,11 @@ class AggregateController(wsgi.Controller):
|
||||
def _build_aggregate_items(self, aggregate):
|
||||
keys = aggregate.obj_fields
|
||||
for key in keys:
|
||||
if (aggregate.obj_attr_is_set(key)
|
||||
or key in aggregate.obj_extra_fields):
|
||||
# NOTE(danms): Skip the uuid field because we have no microversion
|
||||
# to expose it
|
||||
if ((aggregate.obj_attr_is_set(key)
|
||||
or key in aggregate.obj_extra_fields) and
|
||||
key != 'uuid'):
|
||||
yield key, getattr(aggregate, key)
|
||||
|
||||
|
||||
|
@ -278,8 +278,11 @@ class AggregateController(object):
|
||||
def _build_aggregate_items(self, aggregate):
|
||||
keys = aggregate.obj_fields
|
||||
for key in keys:
|
||||
if (aggregate.obj_attr_is_set(key)
|
||||
or key in aggregate.obj_extra_fields):
|
||||
# NOTE(danms): Skip the uuid field because we have no microversion
|
||||
# to expose it
|
||||
if ((aggregate.obj_attr_is_set(key)
|
||||
or key in aggregate.obj_extra_fields) and
|
||||
key != 'uuid'):
|
||||
yield key, getattr(aggregate, key)
|
||||
|
||||
|
||||
|
@ -12,6 +12,9 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova import db
|
||||
from nova import exception
|
||||
@ -19,15 +22,19 @@ from nova import objects
|
||||
from nova.objects import base
|
||||
from nova.objects import fields
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@base.NovaObjectRegistry.register
|
||||
class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
||||
# Version 1.0: Initial version
|
||||
# Version 1.1: String attributes updated to support unicode
|
||||
VERSION = '1.1'
|
||||
# Version 1.2: Added uuid field
|
||||
VERSION = '1.2'
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
'uuid': fields.UUIDField(nullable=False),
|
||||
'name': fields.StringField(),
|
||||
'hosts': fields.ListOfStringsField(nullable=True),
|
||||
'metadata': fields.DictOfStringsField(nullable=True),
|
||||
@ -40,11 +47,31 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
||||
for key in aggregate.fields:
|
||||
if key == 'metadata':
|
||||
db_key = 'metadetails'
|
||||
elif key == 'uuid':
|
||||
continue
|
||||
else:
|
||||
db_key = key
|
||||
setattr(aggregate, key, db_aggregate[db_key])
|
||||
|
||||
# NOTE(danms): Remove this conditional load (and remove uuid
|
||||
# special cases above) once we're in Newton and have enforced
|
||||
# that all UUIDs in the database are not NULL.
|
||||
if db_aggregate.get('uuid'):
|
||||
aggregate.uuid = db_aggregate['uuid']
|
||||
|
||||
aggregate._context = context
|
||||
aggregate.obj_reset_changes()
|
||||
|
||||
# NOTE(danms): This needs to come after obj_reset_changes() to make
|
||||
# sure we only save the uuid, if we generate one.
|
||||
# FIXME(danms): Remove this in Newton once we have enforced that
|
||||
# all aggregates have uuids set in the database.
|
||||
if 'uuid' not in aggregate:
|
||||
aggregate.uuid = uuidutils.generate_uuid()
|
||||
LOG.debug('Generating UUID %(uuid)s for aggregate %(agg)i',
|
||||
dict(uuid=aggregate.uuid, agg=aggregate.id))
|
||||
aggregate.save()
|
||||
|
||||
return aggregate
|
||||
|
||||
def _assert_no_hosts(self, action):
|
||||
@ -69,6 +96,10 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
|
||||
if 'metadata' in updates:
|
||||
# NOTE(danms): For some reason the notification format is weird
|
||||
payload['meta_data'] = payload.pop('metadata')
|
||||
if 'uuid' not in updates:
|
||||
updates['uuid'] = uuidutils.generate_uuid()
|
||||
LOG.debug('Generated uuid %(uuid)s for aggregate',
|
||||
dict(uuid=updates['uuid']))
|
||||
compute_utils.notify_about_aggregate_update(self._context,
|
||||
"create.start",
|
||||
payload)
|
||||
|
@ -28,6 +28,7 @@ from nova import objects
|
||||
from nova import test
|
||||
from nova.tests.unit.api.openstack import fakes
|
||||
from nova.tests.unit import matchers
|
||||
from nova.tests import uuidsentinel
|
||||
|
||||
|
||||
def _make_agg_obj(agg_dict):
|
||||
@ -699,7 +700,7 @@ class AggregateTestCaseV21(test.NoDBTestCase):
|
||||
# We would expect the dictionary that comes out is the same one
|
||||
# that we pump into the aggregate object in the first place
|
||||
agg = {'name': 'aggregate1',
|
||||
'id': 1,
|
||||
'id': 1, 'uuid': uuidsentinel.aggregate,
|
||||
'metadata': {'foo': 'bar', 'availability_zone': 'nova'},
|
||||
'hosts': ['host1', 'host2']}
|
||||
agg_obj = _make_agg_obj(agg)
|
||||
@ -708,6 +709,7 @@ class AggregateTestCaseV21(test.NoDBTestCase):
|
||||
# _marshall_aggregate() puts all fields and obj_extra_fields in the
|
||||
# top-level dict, so we need to put availability_zone at the top also
|
||||
agg['availability_zone'] = 'nova'
|
||||
del agg['uuid']
|
||||
self.assertEqual(agg, marshalled_agg['aggregate'])
|
||||
|
||||
def _assert_agg_data(self, expected, actual):
|
||||
|
@ -20,6 +20,7 @@ from nova import exception
|
||||
from nova.objects import aggregate
|
||||
from nova.tests.unit import fake_notifier
|
||||
from nova.tests.unit.objects import test_objects
|
||||
from nova.tests import uuidsentinel
|
||||
|
||||
|
||||
NOW = timeutils.utcnow().replace(microsecond=0)
|
||||
@ -29,6 +30,7 @@ fake_aggregate = {
|
||||
'deleted_at': None,
|
||||
'deleted': False,
|
||||
'id': 123,
|
||||
'uuid': uuidsentinel.fake_aggregate,
|
||||
'name': 'fake-aggregate',
|
||||
'hosts': ['foo', 'bar'],
|
||||
'metadetails': {'this': 'that'},
|
||||
@ -45,25 +47,43 @@ class _TestAggregateObject(object):
|
||||
agg = aggregate.Aggregate.get_by_id(self.context, 123)
|
||||
self.compare_obj(agg, fake_aggregate, subs=SUBS)
|
||||
|
||||
@mock.patch('nova.objects.Aggregate.save')
|
||||
@mock.patch('nova.db.aggregate_get')
|
||||
def test_load_allocates_uuid(self, mock_get, mock_save):
|
||||
fake_agg = dict(fake_aggregate)
|
||||
del fake_agg['uuid']
|
||||
mock_get.return_value = fake_agg
|
||||
uuid = uuidsentinel.aggregate
|
||||
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_g:
|
||||
mock_g.return_value = uuid
|
||||
obj = aggregate.Aggregate.get_by_id(self.context, 123)
|
||||
mock_g.assert_called_once_with()
|
||||
self.assertEqual(uuid, obj.uuid)
|
||||
mock_save.assert_called_once_with()
|
||||
|
||||
def test_create(self):
|
||||
self.mox.StubOutWithMock(db, 'aggregate_create')
|
||||
db.aggregate_create(self.context, {'name': 'foo'},
|
||||
db.aggregate_create(self.context, {'name': 'foo',
|
||||
'uuid': uuidsentinel.fake_agg},
|
||||
metadata={'one': 'two'}).AndReturn(fake_aggregate)
|
||||
self.mox.ReplayAll()
|
||||
agg = aggregate.Aggregate(context=self.context)
|
||||
agg.name = 'foo'
|
||||
agg.metadata = {'one': 'two'}
|
||||
agg.uuid = uuidsentinel.fake_agg
|
||||
agg.create()
|
||||
self.compare_obj(agg, fake_aggregate, subs=SUBS)
|
||||
|
||||
def test_recreate_fails(self):
|
||||
self.mox.StubOutWithMock(db, 'aggregate_create')
|
||||
db.aggregate_create(self.context, {'name': 'foo'},
|
||||
db.aggregate_create(self.context, {'name': 'foo',
|
||||
'uuid': uuidsentinel.fake_agg},
|
||||
metadata={'one': 'two'}).AndReturn(fake_aggregate)
|
||||
self.mox.ReplayAll()
|
||||
agg = aggregate.Aggregate(context=self.context)
|
||||
agg.name = 'foo'
|
||||
agg.metadata = {'one': 'two'}
|
||||
agg.uuid = uuidsentinel.fake_agg
|
||||
agg.create()
|
||||
self.assertRaises(exception.ObjectActionError, agg.create)
|
||||
|
||||
|
@ -1099,7 +1099,7 @@ class TestRegistry(test.NoDBTestCase):
|
||||
object_data = {
|
||||
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
|
||||
'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c',
|
||||
'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
|
||||
'Aggregate': '1.2-fe9d8c93feb37919753e9e44fe6818a7',
|
||||
'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa',
|
||||
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
|
||||
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
upgrade:
|
||||
- Upon first startup of the scheduler service in Mitaka, all defined
|
||||
aggregates will have UUIDs generated and saved back to the
|
||||
database. If you have a significant number of aggregates, this may
|
||||
delay scheduler start as that work is completed, but it should be
|
||||
minor for most deployments.
|
Loading…
Reference in New Issue
Block a user