placement: Allocation.consumer field

Removes the consumer_id, project_id and user_id fields from the
Allocation object definition. These values are now found in the Consumer
object that is embedded in the Allocation object which is now
non-nullable.

Modifies the serialization in the allocation handler to output
Allocation.consumer.project.external_id and
Allocation.consumer.user.external_id when appropriate for the
microversion.

Calls the create_incomplete_consumers() method during
AllocationList.get_all_by_consumer_id() and
AllocationList.get_all_by_resource_provider() to online-migrate missing
consumer records.

Change-Id: Icae5038190ab8c7bbdb38d54ae909fcbf9048912
blueprint: add-consumer-generation
This commit is contained in:
Jay Pipes 2018-04-30 19:30:39 -04:00 committed by Matt Riedemann
parent 1c36b645e1
commit f449650109
11 changed files with 468 additions and 376 deletions

View File

@ -65,8 +65,11 @@ def _allocations_dict(allocations, key_fetcher, resource_provider=None,
if allocations and want_version and want_version.matches((1, 12)):
# We're looking at a list of allocations by consumer id so
# project and user are consistent across the list
result['project_id'] = allocations[0].project_id
result['user_id'] = allocations[0].user_id
project_id = allocations[0].consumer.project.external_id
user_id = allocations[0].consumer.user.external_id
result['project_id'] = project_id
result['user_id'] = user_id
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return result, last_modified
@ -122,7 +125,7 @@ def _serialize_allocations_for_resource_provider(allocations,
}
}
"""
return _allocations_dict(allocations, lambda x: x.consumer_id,
return _allocations_dict(allocations, lambda x: x.consumer.uuid,
resource_provider=resource_provider)
@ -217,14 +220,13 @@ def _new_allocations(context, resource_provider_uuid, consumer_uuid,
_("Allocation for resource provider '%(rp_uuid)s' "
"that does not exist.") %
{'rp_uuid': resource_provider_uuid})
util.ensure_consumer(context, consumer_uuid, project_id, user_id)
consumer = util.ensure_consumer(
context, consumer_uuid, project_id, user_id)
for resource_class in resources:
allocation = rp_obj.Allocation(
resource_provider=resource_provider,
consumer_id=consumer_uuid,
consumer=consumer,
resource_class=resource_class,
project_id=project_id,
user_id=user_id,
used=resources[resource_class])
allocations.append(allocation)
return allocations

View File

@ -17,12 +17,12 @@ import sqlalchemy as sa
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models as models
CONSUMER_TBL = models.Consumer.__table__
_ALLOC_TBL = models.Allocation.__table__
@db_api.api_context_manager.writer
@ -45,10 +45,10 @@ def create_incomplete_consumers(ctx, batch_size):
# allocations.consumer_id doesn't exist in the consumers table. Use the
# incomplete consumer project and user ID.
alloc_to_consumer = sa.outerjoin(
rp_obj._ALLOC_TBL, CONSUMER_TBL,
rp_obj._ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
_ALLOC_TBL, CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
cols = [
rp_obj._ALLOC_TBL.c.consumer_id,
_ALLOC_TBL.c.consumer_id,
incomplete_proj_id,
incomplete_user_id,
]

View File

@ -38,6 +38,9 @@ from sqlalchemy import sql
from sqlalchemy.sql import null
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.db.sqlalchemy import api as db_api
from nova.db.sqlalchemy import api_models as models
from nova.db.sqlalchemy import resource_class_cache as rc_cache
@ -1536,55 +1539,11 @@ class Allocation(base.VersionedObject, base.TimestampedObject):
fields = {
'id': fields.IntegerField(),
'resource_provider': fields.ObjectField('ResourceProvider'),
'consumer_id': fields.UUIDField(),
'consumer': fields.ObjectField('Consumer', nullable=False),
'resource_class': rc_fields.ResourceClassField(),
'used': fields.IntegerField(),
# The following two fields are allowed to be set to None to
# support Allocations that were created before the fields were
# required.
'project_id': fields.StringField(nullable=True),
'user_id': fields.StringField(nullable=True),
}
def ensure_consumer_project_user(self, ctx):
"""Examines the project_id, user_id of the object along with the
supplied consumer_id and ensures that if project_id and user_id
are set that there are records in the consumers, projects, and
users table for these entities.
:param ctx: `nova.context.RequestContext` object that has the oslo.db
Session object in it
"""
# If project_id and user_id are not set then create a consumer record
# pointing to the incomplete consumer project and user ID.
# This allows microversion <1.8 to continue to work. Since then the
# fields are required and the enforcement is at the HTTP API layer.
if 'project_id' not in self or self.project_id is None:
self.project_id = CONF.placement.incomplete_consumer_project_id
if 'user_id' not in self or self.user_id is None:
self.user_id = CONF.placement.incomplete_consumer_user_id
# Grab the project internal ID if it exists in the projects table
pid = _ensure_project(ctx, self.project_id)
# Grab the user internal ID if it exists in the users table
uid = _ensure_user(ctx, self.user_id)
# Add the consumer if it doesn't already exist
sel_stmt = sa.select([_CONSUMER_TBL.c.uuid]).where(
_CONSUMER_TBL.c.uuid == self.consumer_id)
result = ctx.session.execute(sel_stmt).fetchall()
if not result:
try:
ctx.session.execute(_CONSUMER_TBL.insert().values(
uuid=self.consumer_id,
project_id=pid,
user_id=uid))
except db_exc.DBDuplicateEntry:
# We assume at this time that a consumer project/user can't
# change, so if we get here, we raced and should just pass
# if the consumer already exists.
pass
@db_api.api_context_manager.writer
def _delete_allocations_for_consumer(ctx, consumer_id):
@ -1756,69 +1715,34 @@ def _check_capacity_exceeded(ctx, allocs):
return res_providers
def _ensure_lookup_table_entry(ctx, tbl, external_id):
"""Ensures the supplied external ID exists in the specified lookup table
and if not, adds it. Returns the internal ID.
:param ctx: `nova.context.RequestContext` object that has the oslo.db
Session object in it
:param tbl: The lookup table
:param external_id: The external project or user identifier
:type external_id: string
"""
# Grab the project internal ID if it exists in the projects table
sel = sa.select([tbl.c.id]).where(
tbl.c.external_id == external_id
)
res = ctx.session.execute(sel).fetchall()
if not res:
try:
ins_stmt = tbl.insert().values(external_id=external_id)
res = ctx.session.execute(ins_stmt)
return res.inserted_primary_key[0]
except db_exc.DBDuplicateEntry:
# Another thread added it just before us, so just read the
# internal ID that that thread created...
res = ctx.session.execute(sel).fetchall()
return res[0][0]
def _ensure_project(ctx, external_id):
"""Ensures the supplied external project ID exists in the projects lookup
table and if not, adds it. Returns the internal project ID.
:param ctx: `nova.context.RequestContext` object that has the oslo.db
Session object in it
:param external_id: The external project identifier
:type external_id: string
"""
return _ensure_lookup_table_entry(ctx, _PROJECT_TBL, external_id)
def _ensure_user(ctx, external_id):
"""Ensures the supplied external user ID exists in the users lookup table
and if not, adds it. Returns the internal user ID.
:param ctx: `nova.context.RequestContext` object that has the oslo.db
Session object in it
:param external_id: The external user identifier
:type external_id: string
"""
return _ensure_lookup_table_entry(ctx, _USER_TBL, external_id)
@db_api.api_context_manager.reader
def _get_allocations_by_provider_id(ctx, rp_id):
allocs = sa.alias(_ALLOC_TBL, name="a")
consumers = sa.alias(_CONSUMER_TBL, name="c")
projects = sa.alias(_PROJECT_TBL, name="p")
users = sa.alias(_PROJECT_TBL, name="u")
cols = [
allocs.c.resource_class_id,
allocs.c.consumer_id,
allocs.c.used,
allocs.c.updated_at,
allocs.c.created_at
allocs.c.created_at,
consumers.c.id.label("consumer_id"),
consumers.c.generation.label("consumer_generation"),
sql.func.coalesce(
consumers.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
projects.c.id.label("project_id"),
projects.c.external_id.label("project_external_id"),
users.c.id.label("user_id"),
users.c.external_id.label("user_external_id"),
]
sel = sa.select(cols)
# TODO(jaypipes): change this join to be on ID not UUID
consumers_join = sa.join(
allocs, consumers, allocs.c.consumer_id == consumers.c.uuid)
projects_join = sa.join(
consumers_join, projects, consumers.c.project_id == projects.c.id)
users_join = sa.join(
projects_join, users, consumers.c.user_id == users.c.id)
sel = sa.select(cols).select_from(users_join)
sel = sel.where(allocs.c.resource_provider_id == rp_id)
return [dict(r) for r in ctx.session.execute(sel)]
@ -1837,19 +1761,24 @@ def _get_allocations_by_consumer_uuid(ctx, consumer_uuid):
rp.c.uuid.label("resource_provider_uuid"),
rp.c.generation.label("resource_provider_generation"),
allocs.c.resource_class_id,
allocs.c.consumer_id,
allocs.c.used,
project.c.external_id.label("project_id"),
user.c.external_id.label("user_id"),
consumer.c.id.label("consumer_id"),
consumer.c.generation.label("consumer_generation"),
sql.func.coalesce(
consumer.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
project.c.id.label("project_id"),
project.c.external_id.label("project_external_id"),
user.c.id.label("user_id"),
user.c.external_id.label("user_external_id"),
]
# Build up the joins of the five tables we need to interact with.
rp_join = sa.join(allocs, rp, allocs.c.resource_provider_id == rp.c.id)
consumer_join = sa.outerjoin(rp_join, consumer,
allocs.c.consumer_id == consumer.c.uuid)
project_join = sa.outerjoin(consumer_join, project,
consumer.c.project_id == project.c.id)
user_join = sa.outerjoin(project_join, user,
consumer.c.user_id == user.c.id)
consumer_join = sa.join(rp_join, consumer,
allocs.c.consumer_id == consumer.c.uuid)
project_join = sa.join(consumer_join, project,
consumer.c.project_id == project.c.id)
user_join = sa.join(project_join, user,
consumer.c.user_id == user.c.id)
sel = sa.select(cols).select_from(user_join)
sel = sel.where(allocs.c.consumer_id == consumer_uuid)
@ -1857,6 +1786,82 @@ def _get_allocations_by_consumer_uuid(ctx, consumer_uuid):
return [dict(r) for r in ctx.session.execute(sel)]
@db_api.api_context_manager.writer.independent
def _create_incomplete_consumers_for_provider(ctx, rp_id):
# TODO(jaypipes): Remove in Stein after a blocker migration is added.
"""Creates consumer record if consumer relationship between allocations ->
consumers table is missing for any allocation on the supplied provider
internal ID, using the "incomplete consumer" project and user CONF options.
"""
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, consumer_obj.CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
sel = sa.select([_ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
missing = ctx.session.execute(sel).fetchall()
if missing:
# Do a single INSERT for all missing consumer relationships for the
# provider
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
cols = [
_ALLOC_TBL.c.consumer_id,
incomplete_proj_id,
incomplete_user_id,
]
sel = sa.select(cols)
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
target_cols = ['uuid', 'project_id', 'user_id']
ins_stmt = consumer_obj.CONSUMER_TBL.insert().from_select(
target_cols, sel)
res = ctx.session.execute(ins_stmt)
if res.rowcount > 0:
LOG.info("Online data migration to fix incomplete consumers "
"for resource provider %s has been run. Migrated %d "
"incomplete consumer records on the fly.", rp_id,
res.rowcount)
@db_api.api_context_manager.writer.independent
def _create_incomplete_consumer(ctx, consumer_id):
# TODO(jaypipes): Remove in Stein after a blocker migration is added.
"""Creates consumer record if consumer relationship between allocations ->
consumers table is missing for the supplied consumer UUID, using the
"incomplete consumer" project and user CONF options.
"""
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, consumer_obj.CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
sel = sa.select([_ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.consumer_id == consumer_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
missing = ctx.session.execute(sel).fetchall()
if missing:
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
ins_stmt = consumer_obj.CONSUMER_TBL.insert().values(
uuid=consumer_id, project_id=incomplete_proj_id,
user_id=incomplete_user_id)
res = ctx.session.execute(ins_stmt)
if res.rowcount > 0:
LOG.info("Online data migration to fix incomplete consumers "
"for consumer %s has been run. Migrated %d incomplete "
"consumer records on the fly.", consumer_id, res.rowcount)
@base.VersionedObjectRegistry.register_if(False)
class AllocationList(base.ObjectListBase, base.VersionedObject):
@ -1891,7 +1896,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
# First delete any existing allocations for any consumers. This
# provides a clean slate for the consumers mentioned in the list of
# allocations being manipulated.
consumer_ids = set(alloc.consumer_id for alloc in allocs)
consumer_ids = set(alloc.consumer.uuid for alloc in allocs)
for consumer_id in consumer_ids:
_delete_allocations_for_consumer(context, consumer_id)
@ -1917,7 +1922,6 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
visited_rps = _check_capacity_exceeded(context,
[alloc for alloc in
allocs if alloc.used > 0])
seen_consumers = set()
for alloc in allocs:
# If alloc.used is set to zero that is a signal that we don't want
# to (re-)create any allocations for this resource class.
@ -1929,12 +1933,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
rp = alloc.resource_provider
visited_rps[rp.uuid] = rp
continue
consumer_id = alloc.consumer_id
# Only set consumer <-> project/user association if we haven't set
# it already.
if consumer_id not in seen_consumers:
alloc.ensure_consumer_project_user(context)
seen_consumers.add(consumer_id)
consumer_id = alloc.consumer.uuid
rp = alloc.resource_provider
rc_id = _RC_CACHE.id_from_string(alloc.resource_class)
ins_stmt = _ALLOC_TBL.insert().values(
@ -1955,27 +1954,56 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
@classmethod
def get_all_by_resource_provider(cls, context, rp):
_ensure_rc_cache(context)
_create_incomplete_consumers_for_provider(context, rp.id)
db_allocs = _get_allocations_by_provider_id(context, rp.id)
# Build up a list of Allocation objects, setting the Allocation object
# fields to the same-named database record field we got from
# _get_allocations_by_provider_id(). We already have the
# ResourceProvider object so we just pass that object to the Allocation
# object constructor as-is
objs = [
Allocation(
context, resource_provider=rp,
resource_class=_RC_CACHE.string_from_id(
rec['resource_class_id']),
**rec)
for rec in db_allocs
]
objs = []
for rec in db_allocs:
consumer = consumer_obj.Consumer(
context, id=rec['consumer_id'],
uuid=rec['consumer_uuid'],
generation=rec['consumer_generation'],
project=project_obj.Project(
context, id=rec['project_id'],
external_id=rec['project_external_id']),
user=user_obj.User(
context, id=rec['user_id'],
external_id=rec['user_external_id']))
objs.append(
Allocation(
context, resource_provider=rp,
resource_class=_RC_CACHE.string_from_id(
rec['resource_class_id']),
consumer=consumer,
used=rec['used']))
alloc_list = cls(context, objects=objs)
return alloc_list
@classmethod
def get_all_by_consumer_id(cls, context, consumer_id):
_ensure_rc_cache(context)
_create_incomplete_consumer(context, consumer_id)
db_allocs = _get_allocations_by_consumer_uuid(context, consumer_id)
if db_allocs:
# Build up the Consumer object (it's the same for all allocations
# since we looked up by consumer ID)
db_first = db_allocs[0]
consumer = consumer_obj.Consumer(
context, id=db_first['consumer_id'],
uuid=db_first['consumer_uuid'],
generation=db_first['consumer_generation'],
project=project_obj.Project(
context, id=db_first['project_id'],
external_id=db_first['project_external_id']),
user=user_obj.User(
context, id=db_first['user_id'],
external_id=db_first['user_external_id']))
# Build up a list of Allocation objects, setting the Allocation object
# fields to the same-named database record field we got from
# _get_allocations_by_consumer_id().
@ -1994,7 +2022,8 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
generation=rec['resource_provider_generation']),
resource_class=_RC_CACHE.string_from_id(
rec['resource_class_id']),
**rec)
consumer=consumer,
used=rec['used'])
for rec in db_allocs
]
alloc_list = cls(context, objects=objs)
@ -2014,7 +2043,7 @@ class AllocationList(base.ObjectListBase, base.VersionedObject):
def delete_all(self):
# Allocations can only have a single consumer, so take advantage of
# that fact and do an efficient batch delete
consumer_uuid = self.objects[0].consumer_id
consumer_uuid = self.objects[0].consumer.uuid
_delete_allocations_for_consumer(self._context, consumer_uuid)
def __repr__(self):

View File

@ -93,44 +93,44 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
# Inventory consumed in one RC, but available in the others
excl_1invunavail = self._create_provider('1invunavail')
tb.add_inventory(excl_1invunavail, fields.ResourceClass.VCPU, 10)
tb.allocate_from_provider(excl_1invunavail, fields.ResourceClass.VCPU,
7)
self.allocate_from_provider(
excl_1invunavail, fields.ResourceClass.VCPU, 7)
tb.add_inventory(excl_1invunavail, fields.ResourceClass.MEMORY_MB,
4096)
tb.allocate_from_provider(excl_1invunavail,
self.allocate_from_provider(excl_1invunavail,
fields.ResourceClass.MEMORY_MB, 1024)
tb.add_inventory(excl_1invunavail, fields.ResourceClass.DISK_GB, 2000)
tb.allocate_from_provider(excl_1invunavail,
self.allocate_from_provider(excl_1invunavail,
fields.ResourceClass.DISK_GB, 400)
# Inventory all consumed
excl_allused = self._create_provider('allused')
tb.add_inventory(excl_allused, fields.ResourceClass.VCPU, 10)
tb.allocate_from_provider(excl_allused, fields.ResourceClass.VCPU, 7)
self.allocate_from_provider(excl_allused, fields.ResourceClass.VCPU, 7)
tb.add_inventory(excl_allused, fields.ResourceClass.MEMORY_MB, 4000)
tb.allocate_from_provider(excl_allused,
self.allocate_from_provider(excl_allused,
fields.ResourceClass.MEMORY_MB, 1500)
tb.allocate_from_provider(excl_allused,
self.allocate_from_provider(excl_allused,
fields.ResourceClass.MEMORY_MB, 2000)
tb.add_inventory(excl_allused, fields.ResourceClass.DISK_GB, 1500)
tb.allocate_from_provider(excl_allused, fields.ResourceClass.DISK_GB,
self.allocate_from_provider(excl_allused, fields.ResourceClass.DISK_GB,
1)
# Inventory available in requested classes, but unavailable in others
incl_extra_full = self._create_provider('extra_full')
tb.add_inventory(incl_extra_full, fields.ResourceClass.VCPU, 20)
tb.allocate_from_provider(incl_extra_full, fields.ResourceClass.VCPU,
self.allocate_from_provider(incl_extra_full, fields.ResourceClass.VCPU,
15)
tb.add_inventory(incl_extra_full, fields.ResourceClass.MEMORY_MB, 4096)
tb.allocate_from_provider(incl_extra_full,
self.allocate_from_provider(incl_extra_full,
fields.ResourceClass.MEMORY_MB, 1024)
tb.add_inventory(incl_extra_full, fields.ResourceClass.DISK_GB, 2000)
tb.allocate_from_provider(incl_extra_full,
self.allocate_from_provider(incl_extra_full,
fields.ResourceClass.DISK_GB, 400)
tb.add_inventory(incl_extra_full, fields.ResourceClass.PCI_DEVICE, 4)
tb.allocate_from_provider(incl_extra_full,
self.allocate_from_provider(incl_extra_full,
fields.ResourceClass.PCI_DEVICE, 1)
tb.allocate_from_provider(incl_extra_full,
self.allocate_from_provider(incl_extra_full,
fields.ResourceClass.PCI_DEVICE, 3)
# Inventory available in a unrequested classes, not in requested ones
@ -141,7 +141,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
# Not enough left after reserved + used
tb.add_inventory(excl_extra_avail, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048, reserved=2048)
tb.allocate_from_provider(excl_extra_avail,
self.allocate_from_provider(excl_extra_avail,
fields.ResourceClass.MEMORY_MB, 1040)
# Allocation ratio math
tb.add_inventory(excl_extra_avail, fields.ResourceClass.DISK_GB, 2000,
@ -151,7 +151,7 @@ class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase):
custom_special = rp_obj.ResourceClass(self.ctx, name='CUSTOM_SPECIAL')
custom_special.create()
tb.add_inventory(excl_extra_avail, 'CUSTOM_SPECIAL', 100)
tb.allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99)
self.allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99)
resources = {
fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 5,
@ -1928,7 +1928,7 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# function (the one with HW_NIC_OFFLOAD_GENEVE associated with it) and
# verify that the same request still results in 0 results since the
# function with the required trait no longer has any inventory.
tb.allocate_from_provider(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
self.allocate_from_provider(pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
alloc_cands = self._get_allocation_candidates(
{'':
@ -2045,12 +2045,12 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# _get_trees_matching_all()
cn2_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn2_numa0_pf0)
tb.allocate_from_provider(cn2_pf0, fields.ResourceClass.SRIOV_NET_VF,
self.allocate_from_provider(cn2_pf0, fields.ResourceClass.SRIOV_NET_VF,
8)
cn2_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn2_numa1_pf1)
tb.allocate_from_provider(cn2_pf1, fields.ResourceClass.SRIOV_NET_VF,
self.allocate_from_provider(cn2_pf1, fields.ResourceClass.SRIOV_NET_VF,
8)
trees = rp_obj._get_trees_matching_all(self.ctx,
@ -2152,21 +2152,21 @@ class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase):
# no more providers are returned
cn1_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn1_numa0_pf0)
tb.allocate_from_provider(
self.allocate_from_provider(
cn1_pf0, fields.ResourceClass.SRIOV_NET_VF, 8)
cn1_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn1_numa1_pf1)
tb.allocate_from_provider(
self.allocate_from_provider(
cn1_pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
cn3_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn3_numa0_pf0)
tb.allocate_from_provider(
self.allocate_from_provider(
cn3_pf0, fields.ResourceClass.SRIOV_NET_VF, 8)
cn3_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx,
uuids.cn3_numa1_pf1)
tb.allocate_from_provider(
self.allocate_from_provider(
cn3_pf1, fields.ResourceClass.SRIOV_NET_VF, 8)
trees = rp_obj._get_trees_matching_all(self.ctx,

View File

@ -15,7 +15,10 @@
from oslo_utils import uuidutils
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova import context
from nova import test
from nova.tests import fixtures
@ -44,20 +47,6 @@ def set_traits(rp, *traits):
return tlist
def allocate_from_provider(rp, rc, used, consumer_id=None):
# NOTE(efried): If not specified, use a random consumer UUID - we don't
# want to override any existing allocations from the test case.
consumer_id = consumer_id or uuidutils.generate_uuid()
alloc_list = rp_obj.AllocationList(
rp._context, objects=[
rp_obj.Allocation(
rp._context, resource_provider=rp, resource_class=rc,
consumer_id=consumer_id, used=used)]
)
alloc_list.create_all()
return alloc_list
class PlacementDbBaseTestCase(test.NoDBTestCase):
USES_DB_SELF = True
@ -71,6 +60,11 @@ class PlacementDbBaseTestCase(test.NoDBTestCase):
self._reset_traits_synced()
self.addCleanup(self._reset_traits_synced)
self.ctx = context.RequestContext('fake-user', 'fake-project')
self.user_obj = user_obj.User(self.ctx, external_id='fake-user')
self.user_obj.create()
self.project_obj = project_obj.Project(
self.ctx, external_id='fake-project')
self.project_obj.create()
# For debugging purposes, populated by _create_provider and used by
# _validate_allocation_requests to make failure results more readable.
self.rp_uuid_to_name = {}
@ -95,14 +89,45 @@ class PlacementDbBaseTestCase(test.NoDBTestCase):
self.rp_uuid_to_name[rp.uuid] = name
return rp
def allocate_from_provider(self, rp, rc, used, consumer_id=None,
consumer=None):
# NOTE(efried): If not specified, use a random consumer UUID - we don't
# want to override any existing allocations from the test case.
consumer_id = consumer_id or uuidutils.generate_uuid()
if consumer is None:
try:
consumer = consumer_obj.Consumer.get_by_uuid(
self.ctx, consumer_id)
except exception.NotFound:
consumer = consumer_obj.Consumer(
self.ctx, uuid=consumer_id, user=self.user_obj,
project=self.project_obj)
consumer.create()
alloc_list = rp_obj.AllocationList(
self.ctx, objects=[
rp_obj.Allocation(
self.ctx, resource_provider=rp, resource_class=rc,
consumer=consumer, used=used)]
)
alloc_list.create_all()
return alloc_list
def _make_allocation(self, inv_dict, alloc_dict):
rp = self._create_provider('allocation_resource_provider')
disk_inv = rp_obj.Inventory(context=self.ctx,
resource_provider=rp, **inv_dict)
inv_list = rp_obj.InventoryList(objects=[disk_inv])
rp.set_inventory(inv_list)
consumer_id = alloc_dict['consumer_id']
try:
c = consumer_obj.Consumer.get_by_uuid(self.ctx, consumer_id)
except exception.NotFound:
c = consumer_obj.Consumer(
self.ctx, uuid=consumer_id, user=self.user_obj,
project=self.project_obj)
c.create()
alloc = rp_obj.Allocation(self.ctx, resource_provider=rp,
**alloc_dict)
consumer=c, **alloc_dict)
alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc])
alloc_list.create_all()
return rp, alloc

View File

@ -18,7 +18,10 @@ from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova import context
from nova.db.sqlalchemy import api as db_api
from nova import test
from nova.tests import fixtures
from nova.tests.functional.api.openstack.placement.db import test_base as tb
from nova.tests import uuidsentinel as uuids
@ -36,21 +39,45 @@ class ConsumerTestCase(tb.PlacementDbBaseTestCase):
uuids.non_existing_consumer)
def test_create_and_get(self):
u = user_obj.User(self.ctx, external_id='fake-user')
u = user_obj.User(self.ctx, external_id='another-user')
u.create()
p = project_obj.Project(self.ctx, external_id='fake-project')
p = project_obj.Project(self.ctx, external_id='another-project')
p.create()
c = consumer_obj.Consumer(
self.ctx, uuid=uuids.consumer, user=u, project=p)
c.create()
c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer)
self.assertEqual(1, c.id)
self.assertEqual(1, c.project.id)
self.assertEqual(1, c.user.id)
# Project ID == 1 is fake-project created in setup
self.assertEqual(2, c.project.id)
# User ID == 1 is fake-user created in setup
self.assertEqual(2, c.user.id)
self.assertRaises(exception.ConsumerExists, c.create)
class CreateIncompleteConsumersTestCase(tb.PlacementDbBaseTestCase):
@db_api.api_context_manager.reader
def _get_allocs_with_no_consumer_relationship(ctx):
alloc_to_consumer = sa.outerjoin(
ALLOC_TBL, CONSUMER_TBL,
ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
sel = sa.select([ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(CONSUMER_TBL.c.id.is_(None))
return ctx.session.execute(sel).fetchall()
# NOTE(jaypipes): The tb.PlacementDbBaseTestCase creates a project and user
# which is why we don't base off that. We want a completely bare DB for this
# test.
class CreateIncompleteConsumersTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(CreateIncompleteConsumersTestCase, self).setUp()
self.useFixture(fixtures.Database())
self.api_db = self.useFixture(fixtures.Database(database='api'))
self.ctx = context.RequestContext('fake-user', 'fake-project')
@db_api.api_context_manager.writer
def _create_incomplete_allocations(self, ctx):
# Create some allocations with consumers that don't exist in the
@ -59,14 +86,12 @@ class CreateIncompleteConsumersTestCase(tb.PlacementDbBaseTestCase):
# project/user records.
c1_missing_uuid = uuids.c1_missing
c2_missing_uuid = uuids.c2_missing
ins_stmt = ALLOC_TBL.insert().values(
resource_provider_id=1, resource_class_id=0,
consumer_id=c1_missing_uuid, used=1)
ctx.session.execute(ins_stmt)
ins_stmt = ALLOC_TBL.insert().values(
resource_provider_id=1, resource_class_id=0,
consumer_id=c2_missing_uuid, used=1)
ctx.session.execute(ins_stmt)
c3_missing_uuid = uuids.c3_missing
for c_uuid in (c1_missing_uuid, c2_missing_uuid, c3_missing_uuid):
ins_stmt = ALLOC_TBL.insert().values(
resource_provider_id=1, resource_class_id=0,
consumer_id=c_uuid, used=1)
ctx.session.execute(ins_stmt)
# Verify there are no records in the projects/users table
project_count = ctx.session.scalar(
sa.select([sa.func.count('*')]).select_from(PROJECT_TBL))
@ -112,13 +137,7 @@ class CreateIncompleteConsumersTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(incomplete_user_id, missing_c2['user_id'])
# Ensure there are no more allocations with incomplete consumers
alloc_to_consumer = sa.outerjoin(
ALLOC_TBL, CONSUMER_TBL,
ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
sel = sa.select([ALLOC_TBL])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(CONSUMER_TBL.c.id.is_(None))
res = ctx.session.execute(sel).fetchall()
res = _get_allocs_with_no_consumer_relationship(ctx)
self.assertEqual(0, len(res))
def test_create_incomplete_consumers(self):
@ -126,8 +145,47 @@ class CreateIncompleteConsumersTestCase(tb.PlacementDbBaseTestCase):
records along with the incomplete consumer project/user records.
"""
self._create_incomplete_allocations(self.ctx)
res = consumer_obj.create_incomplete_consumers(self.ctx, 10)
self.assertEqual((2, 2), res)
# We do a "really online" online data migration for incomplete
# consumers when calling AllocationList.get_all_by_consumer_id() and
# AllocationList.get_all_by_resource_provider() and there are still
# incomplete consumer records. So, to simulate a situation where the
# operator has yet to run the nova-manage online_data_migration CLI
# tool completely, we first call
# consumer_obj.create_incomplete_consumers() with a batch size of 1.
# This should mean there will be two allocation records still remaining
# with a missing consumer record (since we create 3 total to begin
# with). We then query the allocations table directly to grab that
# consumer UUID in the allocations table that doesn't refer to a
# consumer table record and call
# AllocationList.get_all_by_consumer_id() with that consumer UUID. This
# should create the remaining missing consumer record "inline" in the
# AllocationList.get_all_by_consumer_id() method.
# After that happens, there should still be a single allocation record
# that is missing a relation to the consumers table. We call the
# AllocationList.get_all_by_resource_provider() method and verify that
# method cleans up the remaining incomplete consumers relationship.
res = consumer_obj.create_incomplete_consumers(self.ctx, 1)
self.assertEqual((1, 1), res)
# Grab the consumer UUID for the allocation record with a
# still-incomplete consumer record.
res = _get_allocs_with_no_consumer_relationship(self.ctx)
self.assertEqual(2, len(res))
still_missing = res[0][0]
rp_obj.AllocationList.get_all_by_consumer_id(self.ctx, still_missing)
# There should still be a single missing consumer relationship. Let's
# grab that and call AllocationList.get_all_by_resource_provider()
# which should clean that last one up for us.
res = _get_allocs_with_no_consumer_relationship(self.ctx)
self.assertEqual(1, len(res))
still_missing = res[0][0]
rp1 = rp_obj.ResourceProvider(self.ctx, id=1)
rp_obj.AllocationList.get_all_by_resource_provider(self.ctx, rp1)
# get_all_by_resource_provider() should have auto-completed the still
# missing consumer record and _check_incomplete_consumers() should
# assert correctly that there are no more incomplete consumer records.
self._check_incomplete_consumers(self.ctx)
res = consumer_obj.create_incomplete_consumers(self.ctx, 10)
self.assertEqual((0, 0), res)

View File

@ -23,8 +23,9 @@ class ProjectTestCase(tb.PlacementDbBaseTestCase):
self.ctx, uuids.non_existing_project)
def test_create_and_get(self):
p = project_obj.Project(self.ctx, external_id='fake-project')
p = project_obj.Project(self.ctx, external_id='another-project')
p.create()
p = project_obj.Project.get_by_external_id(self.ctx, 'fake-project')
self.assertEqual(1, p.id)
p = project_obj.Project.get_by_external_id(self.ctx, 'another-project')
# Project ID == 1 is fake-project created in setup
self.assertEqual(2, p.id)
self.assertRaises(exception.ProjectExists, p.create)

View File

@ -18,6 +18,7 @@ import sqlalchemy as sa
import nova
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.db.sqlalchemy import api_models as models
from nova import rc_fields as fields
@ -359,7 +360,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.assertEqual(1, len(rps))
self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid)
alloc_list = tb.allocate_from_provider(
alloc_list = self.allocate_from_provider(
grandchild_rp, fields.ResourceClass.VCPU, 1)
self.assertRaises(exception.CannotDeleteParentResourceProvider,
@ -533,7 +534,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
"""
rp = self._create_provider('compute-host')
tb.add_inventory(rp, 'VCPU', 12)
tb.allocate_from_provider(rp, 'VCPU', 1)
self.allocate_from_provider(rp, 'VCPU', 1)
inv = rp_obj.Inventory(
resource_provider=rp,
@ -566,7 +567,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
self.assertFalse(mock_log.warning.called)
# Allocate something reasonable for the above inventory
tb.allocate_from_provider(rp, 'DISK_GB', 500)
self.allocate_from_provider(rp, 'DISK_GB', 500)
# Update our inventory to over-subscribe us after the above allocation
disk_inv.total = 400
@ -735,7 +736,7 @@ class ResourceProviderTestCase(tb.PlacementDbBaseTestCase):
tb.add_inventory(rp, DISK_INVENTORY['resource_class'],
DISK_INVENTORY['total'])
expected_gen = rp.generation + 1
tb.allocate_from_provider(rp, DISK_ALLOCATION['resource_class'],
self.allocate_from_provider(rp, DISK_ALLOCATION['resource_class'],
DISK_ALLOCATION['used'])
self.assertEqual(expected_gen, rp.generation)
@ -788,7 +789,7 @@ class ResourceProviderListTestCase(tb.PlacementDbBaseTestCase):
# Create the VCPU allocation only for the first RP
if rp_i != '1':
continue
tb.allocate_from_provider(rp, fields.ResourceClass.VCPU, used=1)
self.allocate_from_provider(rp, fields.ResourceClass.VCPU, used=1)
# Both RPs should accept that request given the only current allocation
# for the first RP is leaving one VCPU
@ -1082,20 +1083,10 @@ class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase):
class TestAllocation(tb.PlacementDbBaseTestCase):
def test_create_list_and_delete_allocation(self):
resource_provider, disk_allocation = self._make_allocation(
DISK_INVENTORY, DISK_ALLOCATION)
self.assertEqual(DISK_ALLOCATION['resource_class'],
disk_allocation.resource_class)
self.assertEqual(resource_provider,
disk_allocation.resource_provider)
self.assertEqual(DISK_ALLOCATION['used'],
disk_allocation.used)
self.assertEqual(DISK_ALLOCATION['consumer_id'],
disk_allocation.consumer_id)
rp, _ = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION)
allocations = rp_obj.AllocationList.get_all_by_resource_provider(
self.ctx, resource_provider)
self.ctx, rp)
self.assertEqual(1, len(allocations))
@ -1105,7 +1096,7 @@ class TestAllocation(tb.PlacementDbBaseTestCase):
allocations.delete_all()
allocations = rp_obj.AllocationList.get_all_by_resource_provider(
self.ctx, resource_provider)
self.ctx, rp)
self.assertEqual(0, len(allocations))
@ -1131,32 +1122,38 @@ class TestAllocation(tb.PlacementDbBaseTestCase):
step_size=64,
allocation_ratio=1.5)
# Now create allocations that represent a move operation where the
# Create a consumer representing the instance
inst_consumer = consumer_obj.Consumer(
self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
project=self.project_obj)
inst_consumer.create()
# Now create an allocation that represents a move operation where the
# scheduler has selected cn_dest as the target host and created a
# "doubled-up" allocation for the duration of the move operation
alloc_list = rp_obj.AllocationList(context=self.ctx,
objects=[
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=cn_source,
resource_class=fields.ResourceClass.VCPU,
used=1),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=cn_source,
resource_class=fields.ResourceClass.MEMORY_MB,
used=256),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.VCPU,
used=1),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.MEMORY_MB,
used=256),
@ -1188,13 +1185,13 @@ class TestAllocation(tb.PlacementDbBaseTestCase):
objects=[
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.VCPU,
used=1),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=cn_dest,
resource_class=fields.ResourceClass.MEMORY_MB,
used=256),
@ -1239,6 +1236,16 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
consumer_uuid = uuidsentinel.consumer
consumer_uuid2 = uuidsentinel.consumer2
# Create a consumer representing the two instances
consumer = consumer_obj.Consumer(
self.ctx, uuid=consumer_uuid, user=self.user_obj,
project=self.project_obj)
consumer.create()
consumer2 = consumer_obj.Consumer(
self.ctx, uuid=consumer_uuid2, user=self.user_obj,
project=self.project_obj)
consumer2.create()
# Create one resource provider with 2 classes
rp1_name = uuidsentinel.rp1_name
rp1_uuid = uuidsentinel.rp1_uuid
@ -1254,11 +1261,11 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# create the allocations for a first consumer
allocation_1 = rp_obj.Allocation(resource_provider=rp1,
consumer_id=consumer_uuid,
consumer=consumer,
resource_class=rp1_class,
used=rp1_used)
allocation_2 = rp_obj.Allocation(resource_provider=rp1,
consumer_id=consumer_uuid,
consumer=consumer,
resource_class=rp2_class,
used=rp2_used)
allocation_list = rp_obj.AllocationList(
@ -1270,11 +1277,11 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# won't actually be doing real allocation math, which triggers
# the sql monster.
allocation_1 = rp_obj.Allocation(resource_provider=rp1,
consumer_id=consumer_uuid2,
consumer=consumer2,
resource_class=rp1_class,
used=rp1_used)
allocation_2 = rp_obj.Allocation(resource_provider=rp1,
consumer_id=consumer_uuid2,
consumer=consumer2,
resource_class=rp2_class,
used=rp2_used)
allocation_list = rp_obj.AllocationList(
@ -1286,6 +1293,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
max_unit = 10
consumer_uuid = uuidsentinel.consumer
# Create a consumer representing the instance
inst_consumer = consumer_obj.Consumer(
self.ctx, uuid=consumer_uuid, user=self.user_obj,
project=self.project_obj)
inst_consumer.create()
# Create two resource providers
rp1_name = uuidsentinel.rp1_name
rp1_uuid = uuidsentinel.rp1_uuid
@ -1302,11 +1315,11 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Two allocations, one for each resource provider.
allocation_1 = rp_obj.Allocation(resource_provider=rp1,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp1_class,
used=rp1_used)
allocation_2 = rp_obj.Allocation(resource_provider=rp2,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp2_class,
used=rp2_used)
allocation_list = rp_obj.AllocationList(
@ -1362,8 +1375,8 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# because a new allocataion is created, adding to the total
# used, not replacing.
rp1_used += 1
tb.allocate_from_provider(rp1, rp1_class, rp1_used,
consumer_id=consumer_uuid)
self.allocate_from_provider(rp1, rp1_class, rp1_used,
consumer=inst_consumer)
rp1_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid(
self.ctx, rp1_uuid)
@ -1406,10 +1419,10 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# allocation, bad step_size
self.assertRaises(exception.InvalidAllocationConstraintsViolated,
tb.allocate_from_provider, rp, rp_class, bad_used)
self.allocate_from_provider, rp, rp_class, bad_used)
# correct for step size
tb.allocate_from_provider(rp, rp_class, good_used)
self.allocate_from_provider(rp, rp_class, good_used)
# check usage
self._validate_usage(rp, good_used)
@ -1438,91 +1451,16 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
self._check_create_allocations(inventory_kwargs,
bad_used, good_used)
def test_create_all_with_project_user(self):
consumer_uuid = uuidsentinel.consumer
rp_class = fields.ResourceClass.DISK_GB
rp = self._make_rp_and_inventory(resource_class=rp_class,
max_unit=500)
allocation1 = rp_obj.Allocation(resource_provider=rp,
consumer_id=consumer_uuid,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=100)
allocation2 = rp_obj.Allocation(resource_provider=rp,
consumer_id=consumer_uuid,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=200)
allocation_list = rp_obj.AllocationList(
self.ctx,
objects=[allocation1, allocation2],
)
allocation_list.create_all()
# Verify that we have records in the consumers, projects, and users
# table for the information used in the above allocation creation
with self.api_db.get_engine().connect() as conn:
tbl = rp_obj._PROJECT_TBL
sel = sa.select([tbl.c.id]).where(
tbl.c.external_id == self.ctx.project_id,
)
res = conn.execute(sel).fetchall()
self.assertEqual(1, len(res), "project lookup not created.")
tbl = rp_obj._USER_TBL
sel = sa.select([tbl.c.id]).where(
tbl.c.external_id == self.ctx.user_id,
)
res = conn.execute(sel).fetchall()
self.assertEqual(1, len(res), "user lookup not created.")
tbl = rp_obj._CONSUMER_TBL
sel = sa.select([tbl.c.id]).where(
tbl.c.uuid == consumer_uuid,
)
res = conn.execute(sel).fetchall()
self.assertEqual(1, len(res), "consumer lookup not created.")
# Create allocation for a different user in the project
other_consumer_uuid = uuidsentinel.other_consumer
allocation3 = rp_obj.Allocation(resource_provider=rp,
consumer_id=other_consumer_uuid,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=uuidsentinel.other_user,
used=200)
allocation_list = rp_obj.AllocationList(
self.ctx,
objects=[allocation3],
)
allocation_list.create_all()
# Get usages back by project
usage_list = rp_obj.UsageList.get_all_by_project_user(
self.ctx, self.ctx.project_id)
self.assertEqual(1, len(usage_list))
self.assertEqual(500, usage_list[0].usage)
# Get usages back by project and user
usage_list = rp_obj.UsageList.get_all_by_project_user(
self.ctx, self.ctx.project_id,
user_id=uuidsentinel.other_user)
self.assertEqual(1, len(usage_list))
self.assertEqual(200, usage_list[0].usage)
# List allocations and confirm project and user
allocation_list = rp_obj.AllocationList.get_all_by_consumer_id(
self.ctx, other_consumer_uuid)
self.assertEqual(1, len(allocation_list))
allocation = allocation_list[0]
self.assertEqual(self.ctx.project_id, allocation.project_id)
self.assertEqual(uuidsentinel.other_user, allocation.user_id)
def test_create_and_clear(self):
"""Test that a used of 0 in an allocation wipes allocations."""
consumer_uuid = uuidsentinel.consumer
# Create a consumer representing the instance
inst_consumer = consumer_obj.Consumer(
self.ctx, uuid=consumer_uuid, user=self.user_obj,
project=self.project_obj)
inst_consumer.create()
rp_class = fields.ResourceClass.DISK_GB
target_rp = self._make_rp_and_inventory(resource_class=rp_class,
max_unit=500)
@ -1530,16 +1468,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Create two allocations with values and confirm the resulting
# usage is as expected.
allocation1 = rp_obj.Allocation(resource_provider=target_rp,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=100)
allocation2 = rp_obj.Allocation(resource_provider=target_rp,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=200)
allocation_list = rp_obj.AllocationList(
self.ctx,
@ -1556,16 +1490,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Create two allocations, one with 0 used, to confirm the
# resulting usage is only of one.
allocation1 = rp_obj.Allocation(resource_provider=target_rp,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=0)
allocation2 = rp_obj.Allocation(resource_provider=target_rp,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=200)
allocation_list = rp_obj.AllocationList(
self.ctx,
@ -1581,6 +1511,13 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# add a source rp and a migration consumer
migration_uuid = uuidsentinel.migration
# Create a consumer representing the migration
mig_consumer = consumer_obj.Consumer(
self.ctx, uuid=migration_uuid, user=self.user_obj,
project=self.project_obj)
mig_consumer.create()
source_rp = self._make_rp_and_inventory(
rp_name=uuidsentinel.source_name, rp_uuid=uuidsentinel.source_uuid,
resource_class=rp_class, max_unit=500)
@ -1588,16 +1525,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Create two allocations, one as the consumer, one as the
# migration.
allocation1 = rp_obj.Allocation(resource_provider=target_rp,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=200)
allocation2 = rp_obj.Allocation(resource_provider=source_rp,
consumer_id=migration_uuid,
consumer=mig_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=200)
allocation_list = rp_obj.AllocationList(
self.ctx,
@ -1621,16 +1554,12 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
# Clear the migration and confirm the target.
allocation1 = rp_obj.Allocation(resource_provider=target_rp,
consumer_id=consumer_uuid,
consumer=inst_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=200)
allocation2 = rp_obj.Allocation(resource_provider=source_rp,
consumer_id=migration_uuid,
consumer=mig_consumer,
resource_class=rp_class,
project_id=self.ctx.project_id,
user_id=self.ctx.user_id,
used=0)
allocation_list = rp_obj.AllocationList(
self.ctx,
@ -1667,49 +1596,61 @@ class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase):
max_unit=1024,
step_size=64)
# Create a consumer representing the instance
inst_consumer = consumer_obj.Consumer(
self.ctx, uuid=uuidsentinel.instance, user=self.user_obj,
project=self.project_obj)
inst_consumer.create()
# First create a allocation to consume full_rp's resource.
alloc_list = rp_obj.AllocationList(context=self.ctx,
objects=[
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance,
consumer=inst_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.MEMORY_MB,
used=1024)
])
alloc_list.create_all()
# Create a consumer representing the second instance
inst2_consumer = consumer_obj.Consumer(
self.ctx, uuid=uuidsentinel.instance2, user=self.user_obj,
project=self.project_obj)
inst2_consumer.create()
# Create an allocation list consisting of valid requests and an invalid
# request exceeding the memory full_rp can provide.
alloc_list = rp_obj.AllocationList(context=self.ctx,
objects=[
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance2,
consumer=inst2_consumer,
resource_provider=empty_rp,
resource_class=fields.ResourceClass.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance2,
consumer=inst2_consumer,
resource_provider=empty_rp,
resource_class=fields.ResourceClass.MEMORY_MB,
used=512),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance2,
consumer=inst2_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.VCPU,
used=12),
rp_obj.Allocation(
context=self.ctx,
consumer_id=uuidsentinel.instance2,
consumer=inst2_consumer,
resource_provider=full_rp,
resource_class=fields.ResourceClass.MEMORY_MB,
used=512),

View File

@ -23,8 +23,9 @@ class UserTestCase(tb.PlacementDbBaseTestCase):
self.ctx, uuids.non_existing_user)
def test_create_and_get(self):
u = user_obj.User(self.ctx, external_id='fake-user')
u = user_obj.User(self.ctx, external_id='another-user')
u.create()
u = user_obj.User.get_by_external_id(self.ctx, 'fake-user')
self.assertEqual(1, u.id)
u = user_obj.User.get_by_external_id(self.ctx, 'another-user')
# User ID == 1 is fake-user created in setup
self.assertEqual(2, u.id)
self.assertRaises(exception.UserExists, u.create)

View File

@ -18,7 +18,10 @@ from oslo_utils import uuidutils
from nova.api.openstack.placement import deploy
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.api.openstack.placement import policies
from nova import conf
from nova import config
@ -116,6 +119,7 @@ class APIFixture(fixture.GabbiFixture):
class AllocationFixture(APIFixture):
"""An APIFixture that has some pre-made Allocations."""
# TODO(jaypipes): Simplify and restructure this fixture
def start_fixture(self):
super(AllocationFixture, self).start_fixture()
self.context = context.get_admin_context()
@ -126,6 +130,13 @@ class AllocationFixture(APIFixture):
user_id = os.environ['USER_ID']
alt_user_id = os.environ['ALT_USER_ID']
user = user_obj.User(self.context, external_id=user_id)
user.create()
alt_user = user_obj.User(self.context, external_id=alt_user_id)
alt_user.create()
project = project_obj.Project(self.context, external_id=project_id)
project.create()
# Stealing from the super
rp_name = os.environ['RP_NAME']
rp_uuid = os.environ['RP_UUID']
@ -133,8 +144,13 @@ class AllocationFixture(APIFixture):
self.context, name=rp_name, uuid=rp_uuid)
rp.create()
# Create some DISK_GB inventory and allocations.
# Create a first consumer for the DISK_GB
consumer_id = uuidutils.generate_uuid()
consumer = consumer_obj.Consumer(
self.context, uuid=consumer_id, user=user, project=project)
consumer.create()
# Create some DISK_GB inventory and allocations.
inventory = rp_obj.Inventory(
self.context, resource_provider=rp,
resource_class='DISK_GB', total=2048,
@ -144,16 +160,12 @@ class AllocationFixture(APIFixture):
alloc1 = rp_obj.Allocation(
self.context, resource_provider=rp,
resource_class='DISK_GB',
consumer_id=consumer_id,
project_id=project_id,
user_id=user_id,
consumer=consumer,
used=500)
alloc2 = rp_obj.Allocation(
self.context, resource_provider=rp,
resource_class='DISK_GB',
consumer_id=consumer_id,
project_id=project_id,
user_id=user_id,
consumer=consumer,
used=500)
alloc_list = rp_obj.AllocationList(
self.context,
@ -161,9 +173,15 @@ class AllocationFixture(APIFixture):
)
alloc_list.create_all()
# Create some VCPU inventory and allocations.
# Create a second consumer for the VCPU
consumer_id = uuidutils.generate_uuid()
consumer = consumer_obj.Consumer(
self.context, uuid=consumer_id, user=user, project=project)
consumer.create()
# This consumer is referenced from the gabbits
os.environ['CONSUMER_ID'] = consumer_id
# Create some VCPU inventory and allocations.
inventory = rp_obj.Inventory(
self.context, resource_provider=rp,
resource_class='VCPU', total=10,
@ -173,37 +191,36 @@ class AllocationFixture(APIFixture):
alloc1 = rp_obj.Allocation(
self.context, resource_provider=rp,
resource_class='VCPU',
consumer_id=consumer_id,
project_id=project_id,
user_id=user_id,
consumer=consumer,
used=2)
alloc2 = rp_obj.Allocation(
self.context, resource_provider=rp,
resource_class='VCPU',
consumer_id=consumer_id,
project_id=project_id,
user_id=user_id,
consumer=consumer,
used=4)
alloc_list = rp_obj.AllocationList(
self.context,
objects=[alloc1, alloc2])
alloc_list.create_all()
# Create a consumer object for a different user
alt_consumer_id = uuidutils.generate_uuid()
alt_consumer = consumer_obj.Consumer(
self.context, uuid=alt_consumer_id, user=alt_user,
project=project)
alt_consumer.create()
os.environ['ALT_CONSUMER_ID'] = alt_consumer_id
# Create a couple of allocations for a different user.
consumer_id = uuidutils.generate_uuid()
alloc1 = rp_obj.Allocation(
self.context, resource_provider=rp,
resource_class='DISK_GB',
consumer_id=consumer_id,
project_id=project_id,
user_id=alt_user_id,
consumer=alt_consumer,
used=20)
alloc2 = rp_obj.Allocation(
self.context, resource_provider=rp,
resource_class='VCPU',
consumer_id=consumer_id,
project_id=project_id,
user_id=alt_user_id,
consumer=alt_consumer,
used=1)
alloc_list = rp_obj.AllocationList(
self.context,

View File

@ -17,7 +17,10 @@ import testtools
from nova.api.openstack.placement import context
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import resource_provider
from nova.api.openstack.placement.objects import user as user_obj
from nova import rc_fields as fields
from nova.tests import uuidsentinel as uuids
@ -75,10 +78,14 @@ _ALLOCATION_DB = {
'id': _ALLOCATION_ID,
'resource_provider_id': _RESOURCE_PROVIDER_ID,
'resource_class_id': _RESOURCE_CLASS_ID,
'consumer_id': uuids.fake_instance,
'consumer_uuid': uuids.fake_instance,
'consumer_id': 1,
'consumer_generation': 0,
'used': 8,
'user_id': None,
'project_id': None,
'user_id': 1,
'user_external_id': uuids.user_id,
'project_id': 1,
'project_external_id': uuids.project_id,
}
@ -265,11 +272,18 @@ class TestAllocation(_TestCase):
rp = resource_provider.ResourceProvider(context=self.context,
uuid=_RESOURCE_PROVIDER_UUID,
name=_RESOURCE_PROVIDER_NAME)
self.project = project_obj.Project(
self.context, external_id='fake-project')
self.user = user_obj.User(
self.context, external_id='fake-user')
self.consumer = consumer_obj.Consumer(
self.context, uuid=uuids.fake_instance, project=self.project,
user=self.user)
obj = resource_provider.Allocation(context=self.context,
id=99,
resource_provider=rp,
resource_class=_RESOURCE_CLASS_NAME,
consumer_id=uuids.fake_instance,
consumer=self.consumer,
used=8)
alloc_list = resource_provider.AllocationList(self.context,
objects=[obj])
@ -278,6 +292,8 @@ class TestAllocation(_TestCase):
class TestAllocationListNoDB(_TestCase):
@mock.patch('nova.api.openstack.placement.objects.resource_provider.'
'_create_incomplete_consumers_for_provider')
@mock.patch('nova.api.openstack.placement.objects.resource_provider.'
'_ensure_rc_cache',
side_effect=_fake_ensure_cache)
@ -285,7 +301,7 @@ class TestAllocationListNoDB(_TestCase):
'_get_allocations_by_provider_id',
return_value=[_ALLOCATION_DB])
def test_get_allocations(self, mock_get_allocations_from_db,
mock_ensure_cache):
mock_ensure_cache, mock_create_consumers):
rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID,
uuid=uuids.resource_provider)
rp_alloc_list = resource_provider.AllocationList
@ -296,6 +312,8 @@ class TestAllocationListNoDB(_TestCase):
mock_get_allocations_from_db.assert_called_once_with(self.context,
rp.id)
self.assertEqual(_ALLOCATION_DB['used'], allocations[0].used)
mock_create_consumers.assert_called_once_with(
self.context, _RESOURCE_PROVIDER_ID)
class TestResourceClass(_TestCase):