Merge "begin refactor AllocCandidates._get_by_filters()"

This commit is contained in:
Zuul 2017-11-17 19:40:52 +00:00 committed by Gerrit Code Review
commit 59e4ecf7c3
3 changed files with 339 additions and 33 deletions

View File

@ -2424,6 +2424,100 @@ def _get_usages_by_provider_and_rc(ctx, rp_ids, rc_ids):
return ctx.session.execute(query).fetchall()
@db_api.api_context_manager.reader
def _get_provider_ids_matching_all(ctx, resources):
"""Returns a list of resource provider internal IDs that have available
inventory to satisfy all the supplied requests for resources.
:note: This function is used for scenarios that do NOT involve sharing
providers. It also only looks at individual resource providers, not
provider trees.
:param ctx: Session context to use
:param resources: A dict, keyed by resource class ID, of the amount
requested of that resource class.
"""
rpt = sa.alias(_RP_TBL, name="rp")
rc_name_map = {
rc_id: _RC_CACHE.string_from_id(rc_id).lower()
for rc_id in resources.keys()
}
# Dict, keyed by resource class ID, of an aliased table object for the
# inventories table winnowed to only that resource class.
inv_tables = {
rc_id: sa.alias(_INV_TBL, name='inv_%s' % rc_name_map[rc_id])
for rc_id in resources.keys()
}
# Dict, keyed by resource class ID, of a derived table (subquery in the
# FROM clause or JOIN) against the allocations table winnowed to only that
# resource class, grouped by resource provider.
usage_tables = {
rc_id: sa.alias(
sa.select([
_ALLOC_TBL.c.resource_provider_id,
sql.func.sum(_ALLOC_TBL.c.used).label('used'),
]).where(
_ALLOC_TBL.c.resource_class_id == rc_id
).group_by(
_ALLOC_TBL.c.resource_provider_id
),
name='usage_%s' % rc_name_map[rc_id],
)
for rc_id in resources.keys()
}
sel = sa.select([rpt.c.id])
# List of the WHERE conditions we build up by iterating over the requested
# resources
where_conds = []
# The chain of joins that we eventually pass to select_from()
join_chain = rpt
for rc_id, amount in resources.items():
inv_by_rc = inv_tables[rc_id]
usage_by_rc = usage_tables[rc_id]
# We can do a more efficient INNER JOIN because we don't have shared
# resource providers to deal with
rp_inv_join = sa.join(
join_chain, inv_by_rc,
sa.and_(
inv_by_rc.c.resource_provider_id == rpt.c.id,
# Add a join condition winnowing this copy of inventories table
# to only the resource class being analyzed in this loop...
inv_by_rc.c.resource_class_id == rc_id,
),
)
rp_inv_usage_join = sa.outerjoin(
rp_inv_join, usage_by_rc,
inv_by_rc.c.resource_provider_id ==
usage_by_rc.c.resource_provider_id,
)
join_chain = rp_inv_usage_join
usage_cond = sa.and_(
(
(sql.func.coalesce(usage_by_rc.c.used, 0) + amount) <=
(inv_by_rc.c.total - inv_by_rc.c.reserved) *
inv_by_rc.c.allocation_ratio
),
inv_by_rc.c.min_unit <= amount,
inv_by_rc.c.max_unit >= amount,
amount % inv_by_rc.c.step_size == 0,
)
where_conds.append(usage_cond)
sel = sel.select_from(join_chain)
sel = sel.where(sa.and_(*where_conds))
return [r[0] for r in ctx.session.execute(sel)]
@base.NovaObjectRegistry.register_if(False)
class AllocationCandidates(base.NovaObject):
"""The AllocationCandidates object is a collection of possible allocations
@ -2484,27 +2578,57 @@ class AllocationCandidates(base.NovaObject):
for key, value in resources.items()
}
roots = [r[0] for r in _get_all_with_shared(context, resources)]
if not roots:
return [], []
# Contains a set of resource provider IDs for each resource class
# requested
# Contains a set of resource provider IDs that share some inventory for
# each resource class requested. We do this here as an optimization. If
# we have no sharing providers, the SQL to find matching providers for
# the requested resources is much simpler.
# TODO(jaypipes): Consider caching this for some amount of time since
# sharing providers generally don't change often and here we aren't
# concerned with how *much* inventory/capacity the sharing provider
# has, only that it is sharing *some* inventory of a particular
# resource class.
sharing_providers = {
rc_id: _get_providers_with_shared_capacity(context, rc_id, amount)
for rc_id, amount in resources.items()
}
have_sharing = any(sharing_providers.values())
if not have_sharing:
# We know there's no sharing providers, so we can more efficiently
# get a list of resource provider IDs that have ALL the requested
# resources.
# NOTE(jaypipes): When we start handling nested providers, we may
# add new code paths or modify this code path to return root
# provider IDs of provider trees instead of the resource provider
# IDs.
non_sharing_rp_ids = _get_provider_ids_matching_all(
context, resources)
else:
# We get the list of resource providers that either have the
# requested resources or are associated with the providers that
# share one or more of the requested resource(s)
non_sharing_rp_ids = [r[0] for r in _get_all_with_shared(
context, resources)]
# non_sharing_rp_ids contains a list of resource provider IDs that
# EITHER have all the requested resources themselves OR have some
# resources and are related to a provider that is sharing some
# resources with it. In other words, this is the list of resource
# provider IDs that are NOT sharing resources.
# We need to grab usage information for all the providers identified as
# potentially fulfilling part of the resource request. This includes
# "root providers" returned from _get_all_with_shared() as well as all
# the providers of shared resources. Here, we simply grab a unique set
# of all those resource provider internal IDs by set union'ing them
# together
all_rp_ids = set(roots)
all_rp_ids = set(non_sharing_rp_ids)
for rps in sharing_providers.values():
all_rp_ids |= set(rps)
# Short out if no providers have been found at this point.
if not all_rp_ids:
return [], []
# Grab usage summaries for each provider (local or sharing) and
# resource class requested
usages = _get_usages_by_provider_and_rc(
@ -2567,8 +2691,8 @@ class AllocationCandidates(base.NovaObject):
)
sharing_resource_requests[shared_rc_id].append(sharing_res_req)
for root_rp_id in roots:
if root_rp_id not in summaries:
for rp_id in non_sharing_rp_ids:
if rp_id not in summaries:
# This resource provider is not providing any resources that
# have been requested. This means that this resource provider
# has some requested resources shared *with* it but the
@ -2576,15 +2700,15 @@ class AllocationCandidates(base.NovaObject):
# it. Since this provider won't actually have an allocation
# request written for it, we just ignore it and continue
continue
root_summary = summaries[root_rp_id]
root_rp_uuid = root_summary['uuid']
rp_summary = summaries[rp_id]
rp_uuid = rp_summary['uuid']
local_resources = set(
rc_id for rc_id in resources.keys()
if rc_id in root_summary['resources']
if rc_id in rp_summary['resources']
)
shared_resources = set(
rc_id for rc_id in resources.keys()
if rc_id not in root_summary['resources']
if rc_id not in rp_summary['resources']
)
# Determine if the root provider actually has all the resources
# requested. If not, we need to add an AllocationRequest
@ -2596,7 +2720,7 @@ class AllocationCandidates(base.NovaObject):
context,
resource_provider=ResourceProvider(
context,
uuid=root_rp_uuid,
uuid=rp_uuid,
),
resource_class=_RC_CACHE.string_from_id(rc_id),
amount=amount,
@ -2633,7 +2757,7 @@ class AllocationCandidates(base.NovaObject):
context,
resource_provider=ResourceProvider(
context,
uuid=root_rp_uuid,
uuid=rp_uuid,
),
resource_class=_RC_CACHE.string_from_id(rc_id),
amount=amount,

View File

@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from nova.api.openstack.placement import lib as placement_lib
from nova import context
from nova import exception
@ -39,6 +41,17 @@ def _set_traits(rp, *traits):
rp.set_traits(rp_obj.TraitList(objects=tlist))
def _allocate_from_provider(rp, rc, used):
# NOTE(efried): Always use a random consumer UUID - we don't want to
# override any existing allocations from the test case.
rp_obj.AllocationList(
rp._context, objects=[
rp_obj.Allocation(
rp._context, resource_provider=rp, resource_class=rc,
consumer_id=uuidutils.generate_uuid(), used=used)]
).create_all()
def _provider_uuids_from_iterable(objs):
"""Return the set of resource_provider.uuid from an iterable.
@ -61,7 +74,174 @@ def _find_summary_for_resource(p_sum, rc_name):
return resource
class AllocationCandidatesTestCase(test.NoDBTestCase):
class ProviderDBHelperTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(ProviderDBHelperTestCase, self).setUp()
self.useFixture(fixtures.Database())
self.api_db = self.useFixture(fixtures.Database(database='api'))
self.ctx = context.RequestContext('fake-user', 'fake-project')
self.requested_resources = {
fields.ResourceClass.VCPU: 1,
fields.ResourceClass.MEMORY_MB: 64,
fields.ResourceClass.DISK_GB: 1500,
}
# For debugging purposes, populated by _create_provider and used by
# _validate_allocation_requests to make failure results more readable.
self.rp_uuid_to_name = {}
def _create_provider(self, name, *aggs):
rp = rp_obj.ResourceProvider(self.ctx, name=name,
uuid=getattr(uuids, name))
rp.create()
if aggs:
rp.set_aggregates(aggs)
self.rp_uuid_to_name[rp.uuid] = name
return rp
def test_get_provider_ids_matching_all(self):
# These RPs are named based on whether we expect them to be 'incl'uded
# or 'excl'uded in the result.
# No inventory records. This one should never show up in a result.
self._create_provider('no_inventory')
# Inventory of adequate CPU and memory, no allocations against it.
excl_big_cm_noalloc = self._create_provider('big_cm_noalloc')
_add_inventory(excl_big_cm_noalloc, fields.ResourceClass.VCPU, 15)
_add_inventory(excl_big_cm_noalloc, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048)
# Adequate inventory, no allocations against it.
incl_biginv_noalloc = self._create_provider('biginv_noalloc')
_add_inventory(incl_biginv_noalloc, fields.ResourceClass.VCPU, 15)
_add_inventory(incl_biginv_noalloc, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048)
_add_inventory(incl_biginv_noalloc, fields.ResourceClass.DISK_GB, 2000)
# No allocations, but inventory unusable. Try to hit all the possible
# reasons for exclusion.
# VCPU min_unit too high
excl_badinv_min_unit = self._create_provider('badinv_min_unit')
_add_inventory(excl_badinv_min_unit, fields.ResourceClass.VCPU, 12,
min_unit=6)
_add_inventory(excl_badinv_min_unit, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048)
_add_inventory(excl_badinv_min_unit, fields.ResourceClass.DISK_GB,
2000)
# MEMORY_MB max_unit too low
excl_badinv_max_unit = self._create_provider('badinv_max_unit')
_add_inventory(excl_badinv_max_unit, fields.ResourceClass.VCPU, 15)
_add_inventory(excl_badinv_max_unit, fields.ResourceClass.MEMORY_MB,
4096, max_unit=512)
_add_inventory(excl_badinv_max_unit, fields.ResourceClass.DISK_GB,
2000)
# DISK_GB unsuitable step_size
excl_badinv_step_size = self._create_provider('badinv_step_size')
_add_inventory(excl_badinv_step_size, fields.ResourceClass.VCPU, 15)
_add_inventory(excl_badinv_step_size, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048)
_add_inventory(excl_badinv_step_size, fields.ResourceClass.DISK_GB,
2000, step_size=7)
# Not enough total VCPU
excl_badinv_total = self._create_provider('badinv_total')
_add_inventory(excl_badinv_total, fields.ResourceClass.VCPU, 4)
_add_inventory(excl_badinv_total, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048)
_add_inventory(excl_badinv_total, fields.ResourceClass.DISK_GB, 2000)
# Too much reserved MEMORY_MB
excl_badinv_reserved = self._create_provider('badinv_reserved')
_add_inventory(excl_badinv_reserved, fields.ResourceClass.VCPU, 15)
_add_inventory(excl_badinv_reserved, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048, reserved=3500)
_add_inventory(excl_badinv_reserved, fields.ResourceClass.DISK_GB,
2000)
# DISK_GB allocation ratio blows it up
excl_badinv_alloc_ratio = self._create_provider('badinv_alloc_ratio')
_add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.VCPU, 15)
_add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.MEMORY_MB,
4096, max_unit=2048)
_add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.DISK_GB,
2000, allocation_ratio=0.5)
# Inventory consumed in one RC, but available in the others
excl_1invunavail = self._create_provider('1invunavail')
_add_inventory(excl_1invunavail, fields.ResourceClass.VCPU, 10)
_allocate_from_provider(excl_1invunavail, fields.ResourceClass.VCPU, 7)
_add_inventory(excl_1invunavail, fields.ResourceClass.MEMORY_MB, 4096)
_allocate_from_provider(excl_1invunavail,
fields.ResourceClass.MEMORY_MB, 1024)
_add_inventory(excl_1invunavail, fields.ResourceClass.DISK_GB, 2000)
_allocate_from_provider(excl_1invunavail,
fields.ResourceClass.DISK_GB, 400)
# Inventory all consumed
excl_allused = self._create_provider('allused')
_add_inventory(excl_allused, fields.ResourceClass.VCPU, 10)
_allocate_from_provider(excl_allused, fields.ResourceClass.VCPU, 7)
_add_inventory(excl_allused, fields.ResourceClass.MEMORY_MB, 4000)
_allocate_from_provider(excl_allused,
fields.ResourceClass.MEMORY_MB, 1500)
_allocate_from_provider(excl_allused,
fields.ResourceClass.MEMORY_MB, 2000)
_add_inventory(excl_allused, fields.ResourceClass.DISK_GB, 1500)
_allocate_from_provider(excl_allused, fields.ResourceClass.DISK_GB, 1)
# Inventory available in requested classes, but unavailable in others
incl_extra_full = self._create_provider('extra_full')
_add_inventory(incl_extra_full, fields.ResourceClass.VCPU, 20)
_allocate_from_provider(incl_extra_full, fields.ResourceClass.VCPU, 15)
_add_inventory(incl_extra_full, fields.ResourceClass.MEMORY_MB, 4096)
_allocate_from_provider(incl_extra_full,
fields.ResourceClass.MEMORY_MB, 1024)
_add_inventory(incl_extra_full, fields.ResourceClass.DISK_GB, 2000)
_allocate_from_provider(incl_extra_full, fields.ResourceClass.DISK_GB,
400)
_add_inventory(incl_extra_full, fields.ResourceClass.PCI_DEVICE, 4)
_allocate_from_provider(incl_extra_full,
fields.ResourceClass.PCI_DEVICE, 1)
_allocate_from_provider(incl_extra_full,
fields.ResourceClass.PCI_DEVICE, 3)
# Inventory available in a unrequested classes, not in requested ones
excl_extra_avail = self._create_provider('extra_avail')
# Incompatible step size
_add_inventory(excl_extra_avail, fields.ResourceClass.VCPU, 10,
step_size=3)
# Not enough left after reserved + used
_add_inventory(excl_extra_avail, fields.ResourceClass.MEMORY_MB, 4096,
max_unit=2048, reserved=2048)
_allocate_from_provider(excl_extra_avail,
fields.ResourceClass.MEMORY_MB, 1040)
# Allocation ratio math
_add_inventory(excl_extra_avail, fields.ResourceClass.DISK_GB, 2000,
allocation_ratio=0.5)
_add_inventory(excl_extra_avail, fields.ResourceClass.IPV4_ADDRESS, 48)
custom_special = rp_obj.ResourceClass(self.ctx, name='CUSTOM_SPECIAL')
custom_special.create()
_add_inventory(excl_extra_avail, 'CUSTOM_SPECIAL', 100)
_allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99)
resources = {
fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 5,
fields.ResourceClass.STANDARD.index(
fields.ResourceClass.MEMORY_MB): 1024,
fields.ResourceClass.STANDARD.index(
fields.ResourceClass.DISK_GB): 1500
}
# Run it!
res = rp_obj._get_provider_ids_matching_all(self.ctx, resources)
# We should get all the incl_* RPs
expected = [incl_biginv_noalloc, incl_extra_full]
self.assertEqual(set(rp.id for rp in expected), set(res))
class AllocationCandidatesTestCase(ProviderDBHelperTestCase):
"""Tests a variety of scenarios with both shared and non-shared resource
providers that the AllocationCandidates.get_by_requests() method returns a
set of alternative allocation requests and provider summaries that may be
@ -69,13 +249,8 @@ class AllocationCandidatesTestCase(test.NoDBTestCase):
resources against providers.
"""
USES_DB_SELF = True
def setUp(self):
super(AllocationCandidatesTestCase, self).setUp()
self.useFixture(fixtures.Database())
self.api_db = self.useFixture(fixtures.Database(database='api'))
self.ctx = context.RequestContext('fake-user', 'fake-project')
self.requested_resources = {
fields.ResourceClass.VCPU: 1,
fields.ResourceClass.MEMORY_MB: 64,
@ -92,15 +267,6 @@ class AllocationCandidatesTestCase(test.NoDBTestCase):
resources=self.requested_resources)]
return rp_obj.AllocationCandidates.get_by_requests(self.ctx, requests)
def _create_provider(self, name, *aggs):
rp = rp_obj.ResourceProvider(self.ctx, name=name,
uuid=getattr(uuids, name))
rp.create()
if aggs:
rp.set_aggregates(aggs)
self.rp_uuid_to_name[rp.uuid] = name
return rp
def _validate_allocation_requests(self, expected, candidates):
"""Assert correctness of allocation requests in allocation candidates.

View File

@ -2100,7 +2100,9 @@ class SharedProviderTestCase(ResourceProviderBaseCase):
)
cn2.create()
# Populate the two compute node providers with inventory, sans DISK_GB
# Populate the two compute node providers with inventory. One has
# DISK_GB. Both should be excluded from the result (one doesn't have
# the requested resource; but neither is a sharing provider).
for cn in (cn1, cn2):
vcpu = rp_obj.Inventory(
resource_provider=cn,
@ -2122,7 +2124,21 @@ class SharedProviderTestCase(ResourceProviderBaseCase):
step_size=64,
allocation_ratio=1.5,
)
inv_list = rp_obj.InventoryList(objects=[vcpu, memory_mb])
if cn is cn1:
disk_gb = rp_obj.Inventory(
resource_provider=cn,
resource_class=fields.ResourceClass.DISK_GB,
total=2000,
reserved=0,
min_unit=10,
max_unit=100,
step_size=10,
allocation_ratio=1.0,
)
inv_list = rp_obj.InventoryList(objects=[vcpu, memory_mb,
disk_gb])
else:
inv_list = rp_obj.InventoryList(objects=[vcpu, memory_mb])
cn.set_inventory(inv_list)
# Create the shared storage pool