OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

4241 lines
184 KiB

# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import itertools
import random
# NOTE(cdent): The resource provider objects are designed to never be
# used over RPC. Remote manipulation is done with the placement HTTP
# API. The 'remotable' decorators should not be used, the objects should
# not be registered and there is no need to express VERSIONs nor handle
# obj_make_compatible.
import os_traits
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import six
import sqlalchemy as sa
from sqlalchemy import exc as sqla_exc
from sqlalchemy import func
from sqlalchemy import sql
from sqlalchemy.sql import null
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.api.openstack.placement import resource_class_cache as rc_cache
from nova.db.sqlalchemy import api_models as models
from nova.i18n import _
from nova import rc_fields
_TRAIT_TBL = models.Trait.__table__
_ALLOC_TBL = models.Allocation.__table__
_INV_TBL = models.Inventory.__table__
_RP_TBL = models.ResourceProvider.__table__
# Not used in this file but used in tests.
_RC_TBL = models.ResourceClass.__table__
_AGG_TBL = models.PlacementAggregate.__table__
_RP_AGG_TBL = models.ResourceProviderAggregate.__table__
_RP_TRAIT_TBL = models.ResourceProviderTrait.__table__
_PROJECT_TBL = models.Project.__table__
_USER_TBL = models.User.__table__
_CONSUMER_TBL = models.Consumer.__table__
_RC_CACHE = None
_TRAIT_LOCK = 'trait_sync'
_TRAITS_SYNCED = False
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@db_api.placement_context_manager.reader
def ensure_rc_cache(ctx):
"""Ensures that a singleton resource class cache has been created in the
module's scope.
:param ctx: `nova.context.RequestContext` that may be used to grab a DB
connection.
"""
global _RC_CACHE
if _RC_CACHE is not None:
return
_RC_CACHE = rc_cache.ResourceClassCache(ctx)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
# Bug #1760322: If the caller raises an exception, we don't want the trait
# sync rolled back; so use an .independent transaction
@db_api.placement_context_manager.writer.independent
def _trait_sync(ctx):
"""Sync the os_traits symbols to the database.
Reads all symbols from the os_traits library, checks if any of them do
not exist in the database and bulk-inserts those that are not. This is
done once per process using this code if either Trait.get_by_name or
TraitList.get_all is called.
:param ctx: `nova.context.RequestContext` that may be used to grab a DB
connection.
"""
# Create a set of all traits in the os_traits library.
std_traits = set(os_traits.get_traits())
sel = sa.select([_TRAIT_TBL.c.name])
res = ctx.session.execute(sel).fetchall()
# Create a set of all traits in the db that are not custom
# traits.
db_traits = set(
r[0] for r in res
if not os_traits.is_custom(r[0])
)
# Determine those traits which are in os_traits but not
# currently in the database, and insert them.
need_sync = std_traits - db_traits
ins = _TRAIT_TBL.insert()
batch_args = [
{'name': six.text_type(trait)}
for trait in need_sync
]
if batch_args:
try:
ctx.session.execute(ins, batch_args)
LOG.info("Synced traits from os_traits into API DB: %s",
need_sync)
except db_exc.DBDuplicateEntry:
pass # some other process sync'd, just ignore
def ensure_trait_sync(ctx):
"""Ensures that the os_traits library is synchronized to the traits db.
If _TRAITS_SYNCED is False then this process has not tried to update the
traits db. Do so by calling _trait_sync. Since the placement API server
could be multi-threaded, lock around testing _TRAITS_SYNCED to avoid
duplicating work.
Different placement API server processes that talk to the same database
will avoid issues through the power of transactions.
:param ctx: `nova.context.RequestContext` that may be used to grab a DB
connection.
"""
global _TRAITS_SYNCED
# If another thread is doing this work, wait for it to complete.
# When that thread is done _TRAITS_SYNCED will be true in this
# thread and we'll simply return.
with lockutils.lock(_TRAIT_LOCK):
if not _TRAITS_SYNCED:
_trait_sync(ctx)
_TRAITS_SYNCED = True
def _get_current_inventory_resources(ctx, rp):
"""Returns a set() containing the resource class IDs for all resources
currently having an inventory record for the supplied resource provider.
:param ctx: `nova.context.RequestContext` that may be used to grab a DB
connection.
:param rp: Resource provider to query inventory for.
"""
cur_res_sel = sa.select([_INV_TBL.c.resource_class_id]).where(
_INV_TBL.c.resource_provider_id == rp.id)
existing_resources = ctx.session.execute(cur_res_sel).fetchall()
return set([r[0] for r in existing_resources])
def _delete_inventory_from_provider(ctx, rp, to_delete):
"""Deletes any inventory records from the supplied provider and set() of
resource class identifiers.
If there are allocations for any of the inventories to be deleted raise
InventoryInUse exception.
:param ctx: `nova.context.RequestContext` that contains an oslo_db Session
:param rp: Resource provider from which to delete inventory.
:param to_delete: set() containing resource class IDs for records to
delete.
"""
allocation_query = sa.select(
[_ALLOC_TBL.c.resource_class_id.label('resource_class')]).where(
sa.and_(_ALLOC_TBL.c.resource_provider_id == rp.id,
_ALLOC_TBL.c.resource_class_id.in_(to_delete))
).group_by(_ALLOC_TBL.c.resource_class_id)
allocations = ctx.session.execute(allocation_query).fetchall()
if allocations:
resource_classes = ', '.join([_RC_CACHE.string_from_id(alloc[0])
for alloc in allocations])
raise exception.InventoryInUse(resource_classes=resource_classes,
resource_provider=rp.uuid)
del_stmt = _INV_TBL.delete().where(sa.and_(
_INV_TBL.c.resource_provider_id == rp.id,
_INV_TBL.c.resource_class_id.in_(to_delete)))
res = ctx.session.execute(del_stmt)
return res.rowcount
def _add_inventory_to_provider(ctx, rp, inv_list, to_add):
"""Inserts new inventory records for the supplied resource provider.
:param ctx: `nova.context.RequestContext` that contains an oslo_db Session
:param rp: Resource provider to add inventory to.
:param inv_list: InventoryList object
:param to_add: set() containing resource class IDs to search inv_list for
adding to resource provider.
"""
for rc_id in to_add:
rc_str = _RC_CACHE.string_from_id(rc_id)
inv_record = inv_list.find(rc_str)
ins_stmt = _INV_TBL.insert().values(
resource_provider_id=rp.id,
resource_class_id=rc_id,
total=inv_record.total,
reserved=inv_record.reserved,
min_unit=inv_record.min_unit,
max_unit=inv_record.max_unit,
step_size=inv_record.step_size,
allocation_ratio=inv_record.allocation_ratio)
ctx.session.execute(ins_stmt)
def _update_inventory_for_provider(ctx, rp, inv_list, to_update):
"""Updates existing inventory records for the supplied resource provider.
:param ctx: `nova.context.RequestContext` that contains an oslo_db Session
:param rp: Resource provider on which to update inventory.
:param inv_list: InventoryList object
:param to_update: set() containing resource class IDs to search inv_list
for updating in resource provider.
:returns: A list of (uuid, class) tuples that have exceeded their
capacity after this inventory update.
"""
exceeded = []
for rc_id in to_update:
rc_str = _RC_CACHE.string_from_id(rc_id)
inv_record = inv_list.find(rc_str)
allocation_query = sa.select(
[func.sum(_ALLOC_TBL.c.used).label('usage')]).\
where(sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp.id,
_ALLOC_TBL.c.resource_class_id == rc_id))
allocations = ctx.session.execute(allocation_query).first()
if (allocations
and allocations['usage'] is not None
and allocations['usage'] > inv_record.capacity):
exceeded.append((rp.uuid, rc_str))
upd_stmt = _INV_TBL.update().where(sa.and_(
_INV_TBL.c.resource_provider_id == rp.id,
_INV_TBL.c.resource_class_id == rc_id)).values(
total=inv_record.total,
reserved=inv_record.reserved,
min_unit=inv_record.min_unit,
max_unit=inv_record.max_unit,
step_size=inv_record.step_size,
allocation_ratio=inv_record.allocation_ratio)
res = ctx.session.execute(upd_stmt)
if not res.rowcount:
raise exception.InventoryWithResourceClassNotFound(
resource_class=rc_str)
return exceeded
def _increment_provider_generation(ctx, rp):
"""Increments the supplied provider's generation value, supplying the
currently-known generation. Returns whether the increment succeeded.
:param ctx: `nova.context.RequestContext` that contains an oslo_db Session
:param rp: `ResourceProvider` whose generation should be updated.
:returns: The new resource provider generation value if successful.
:raises nova.exception.ConcurrentUpdateDetected: if another thread updated
the same resource provider's view of its inventory or allocations
in between the time when this object was originally read
and the call to set the inventory.
"""
rp_gen = rp.generation
new_generation = rp_gen + 1
upd_stmt = _RP_TBL.update().where(sa.and_(
_RP_TBL.c.id == rp.id,
_RP_TBL.c.generation == rp_gen)).values(
generation=(new_generation))
res = ctx.session.execute(upd_stmt)
if res.rowcount != 1:
raise exception.ResourceProviderConcurrentUpdateDetected()
return new_generation
@db_api.placement_context_manager.writer
def _add_inventory(context, rp, inventory):
"""Add one Inventory that wasn't already on the provider.
:raises `exception.ResourceClassNotFound` if inventory.resource_class
cannot be found in either the standard classes or the DB.
"""
rc_id = _RC_CACHE.id_from_string(inventory.resource_class)
inv_list = InventoryList(objects=[inventory])
_add_inventory_to_provider(
context, rp, inv_list, set([rc_id]))
rp.generation = _increment_provider_generation(context, rp)
@db_api.placement_context_manager.writer
def _update_inventory(context, rp, inventory):
"""Update an inventory already on the provider.
:raises `exception.ResourceClassNotFound` if inventory.resource_class
cannot be found in either the standard classes or the DB.
"""
rc_id = _RC_CACHE.id_from_string(inventory.resource_class)
inv_list = InventoryList(objects=[inventory])
exceeded = _update_inventory_for_provider(
context, rp, inv_list, set([rc_id]))
rp.generation = _increment_provider_generation(context, rp)
return exceeded
@db_api.placement_context_manager.writer
def _delete_inventory(context, rp, resource_class):
"""Delete up to one Inventory of the given resource_class string.
:raises `exception.ResourceClassNotFound` if resource_class
cannot be found in either the standard classes or the DB.
"""
rc_id = _RC_CACHE.id_from_string(resource_class)
if not _delete_inventory_from_provider(context, rp, [rc_id]):
raise exception.NotFound(
'No inventory of class %s found for delete'
% resource_class)
rp.generation = _increment_provider_generation(context, rp)
@db_api.placement_context_manager.writer
def _set_inventory(context, rp, inv_list):
"""Given an InventoryList object, replaces the inventory of the
resource provider in a safe, atomic fashion using the resource
provider's generation as a consistent view marker.
:param context: Nova RequestContext.
:param rp: `ResourceProvider` object upon which to set inventory.
:param inv_list: `InventoryList` object to save to backend storage.
:returns: A list of (uuid, class) tuples that have exceeded their
capacity after this inventory update.
:raises nova.exception.ConcurrentUpdateDetected: if another thread updated
the same resource provider's view of its inventory or allocations
in between the time when this object was originally read
and the call to set the inventory.
:raises `exception.ResourceClassNotFound` if any resource class in any
inventory in inv_list cannot be found in either the standard
classes or the DB.
:raises `exception.InventoryInUse` if we attempt to delete inventory
from a provider that has allocations for that resource class.
"""
existing_resources = _get_current_inventory_resources(context, rp)
these_resources = set([_RC_CACHE.id_from_string(r.resource_class)
for r in inv_list.objects])
# Determine which resources we should be adding, deleting and/or
# updating in the resource provider's inventory by comparing sets
# of resource class identifiers.
to_add = these_resources - existing_resources
to_delete = existing_resources - these_resources
to_update = these_resources & existing_resources
exceeded = []
if to_delete:
_delete_inventory_from_provider(context, rp, to_delete)
if to_add:
_add_inventory_to_provider(context, rp, inv_list, to_add)
if to_update:
exceeded = _update_inventory_for_provider(context, rp, inv_list,
to_update)
# Here is where we update the resource provider's generation value. If
# this update updates zero rows, that means that another thread has updated
# the inventory for this resource provider between the time the caller
# originally read the resource provider record and inventory information
# and this point. We raise an exception here which will rollback the above
# transaction and return an error to the caller to indicate that they can
# attempt to retry the inventory save after reverifying any capacity
# conditions and re-reading the existing inventory information.
rp.generation = _increment_provider_generation(context, rp)
return exceeded
@db_api.placement_context_manager.reader
def _get_provider_by_uuid(context, uuid):
"""Given a UUID, return a dict of information about the resource provider
from the database.
:raises: NotFound if no such provider was found
:param uuid: The UUID to look up
"""
rpt = sa.alias(_RP_TBL, name="rp")
parent = sa.alias(_RP_TBL, name="parent")
root = sa.alias(_RP_TBL, name="root")
# TODO(jaypipes): Change this to an inner join when we are sure all
# root_provider_id values are NOT NULL
rp_to_root = sa.outerjoin(rpt, root, rpt.c.root_provider_id == root.c.id)
rp_to_parent = sa.outerjoin(rp_to_root, parent,
rpt.c.parent_provider_id == parent.c.id)
cols = [
rpt.c.id,
rpt.c.uuid,
rpt.c.name,
rpt.c.generation,
root.c.uuid.label("root_provider_uuid"),
parent.c.uuid.label("parent_provider_uuid"),
rpt.c.updated_at,
rpt.c.created_at,
]
sel = sa.select(cols).select_from(rp_to_parent).where(rpt.c.uuid == uuid)
res = context.session.execute(sel).fetchone()
if not res:
raise exception.NotFound(
'No resource provider with uuid %s found' % uuid)
return dict(res)
@db_api.placement_context_manager.reader
def _get_aggregates_by_provider_id(context, rp_id):
join_statement = sa.join(
_AGG_TBL, _RP_AGG_TBL, sa.and_(
_AGG_TBL.c.id == _RP_AGG_TBL.c.aggregate_id,
_RP_AGG_TBL.c.resource_provider_id == rp_id))
sel = sa.select([_AGG_TBL.c.uuid]).select_from(join_statement)
return [r[0] for r in context.session.execute(sel).fetchall()]
@db_api.placement_context_manager.reader
def _anchors_for_sharing_providers(context, rp_ids, get_id=False):
"""Given a list of internal IDs of sharing providers, returns a set of
tuples of (sharing provider UUID, anchor provider UUID), where each of
anchor is the unique root provider of a tree associated with the same
aggregate as the sharing provider. (These are the providers that can
"anchor" a single AllocationRequest.)
The sharing provider may or may not itself be part of a tree; in either
case, an entry for this root provider is included in the result.
If the sharing provider is not part of any aggregate, the empty list is
returned.
If get_id is True, it returns a set of tuples of (sharing provider ID,
anchor provider ID) instead.
"""
# SELECT sps.uuid, COALESCE(rps.uuid, shr_with_sps.uuid)
# FROM resource_providers AS sps
# INNER JOIN resource_provider_aggregates AS shr_aggs
# ON sps.id = shr_aggs.resource_provider_id
# INNER JOIN resource_provider_aggregates AS shr_with_sps_aggs
# ON shr_aggs.aggregate_id = shr_with_sps_aggs.aggregate_id
# INNER JOIN resource_providers AS shr_with_sps
# ON shr_with_sps_aggs.resource_provider_id = shr_with_sps.id
# LEFT JOIN resource_providers AS rps
# ON shr_with_sps.root_provider_id = rps.id
# WHERE sps.id IN $(RP_IDs)
rps = sa.alias(_RP_TBL, name='rps')
sps = sa.alias(_RP_TBL, name='sps')
shr_aggs = sa.alias(_RP_AGG_TBL, name='shr_aggs')
shr_with_sps_aggs = sa.alias(_RP_AGG_TBL, name='shr_with_sps_aggs')
shr_with_sps = sa.alias(_RP_TBL, name='shr_with_sps')
join_chain = sa.join(
sps, shr_aggs, sps.c.id == shr_aggs.c.resource_provider_id)
join_chain = sa.join(
join_chain, shr_with_sps_aggs,
shr_aggs.c.aggregate_id == shr_with_sps_aggs.c.aggregate_id)
join_chain = sa.join(
join_chain, shr_with_sps,
shr_with_sps_aggs.c.resource_provider_id == shr_with_sps.c.id)
if get_id:
# TODO(yikun): Change `func.coalesce(shr_with_sps.c.root_provider_id,
# shr_with_sps.c.id)` to `shr_with_sps.c.root_provider_id` when we are
# sure all root_provider_id values are NOT NULL
sel = sa.select([sps.c.id, func.coalesce(
shr_with_sps.c.root_provider_id, shr_with_sps.c.id)])
else:
# TODO(efried): Change this to an inner join and change
# 'func.coalesce(rps.c.uuid, shr_with_sps.c.uuid)' to `rps.c.uuid`
# when we are sure all root_provider_id values are NOT NULL
join_chain = sa.outerjoin(
join_chain, rps, shr_with_sps.c.root_provider_id == rps.c.id)
sel = sa.select([sps.c.uuid, func.coalesce(rps.c.uuid,
shr_with_sps.c.uuid)])
sel = sel.select_from(join_chain)
sel = sel.where(sps.c.id.in_(rp_ids))
return set([(r[0], r[1]) for r in context.session.execute(sel).fetchall()])
@db_api.placement_context_manager.writer
def _set_aggregates(context, resource_provider, provided_aggregates,
increment_generation=False):
rp_id = resource_provider.id
# When aggregate uuids are persisted no validation is done
# to ensure that they refer to something that has meaning
# elsewhere. It is assumed that code which makes use of the
# aggregates, later, will validate their fitness.
# TODO(cdent): At the moment we do not delete
# a PlacementAggregate that no longer has any associations
# with at least one resource provider. We may wish to do that
# to avoid bloat if it turns out we're creating a lot of noise.
# Not doing now to move things along.
provided_aggregates = set(provided_aggregates)
existing_aggregates = set(_get_aggregates_by_provider_id(context, rp_id))
to_add = provided_aggregates - existing_aggregates
target_aggregates = list(provided_aggregates)
# Create any aggregates that do not yet exist in
# PlacementAggregates. This is different from
# the set in existing_aggregates; those are aggregates for
# which there are associations for the resource provider
# at rp_id. The following loop checks for the existence of any
# aggregate with the provided uuid. In this way we only
# create a new row in the PlacementAggregate table if the
# aggregate uuid has never been seen before. Code further
# below will update the associations.
for agg_uuid in to_add:
found_agg = context.session.query(models.PlacementAggregate.uuid).\
filter_by(uuid=agg_uuid).first()
if not found_agg:
new_aggregate = models.PlacementAggregate(uuid=agg_uuid)
try:
context.session.add(new_aggregate)
# Flush each aggregate to explicitly call the INSERT
# statement that could result in an integrity error
# if some other thread has added this agg_uuid. This
# also makes sure that the new aggregates have
# ids when the SELECT below happens.
context.session.flush()
except db_exc.DBDuplicateEntry:
# Something else has already added this agg_uuid
pass
# Remove all aggregate associations so we can refresh them
# below. This means that all associations are added, but the
# aggregates themselves stay around.
context.session.query(models.ResourceProviderAggregate).filter_by(
resource_provider_id=rp_id).delete()
# Set resource_provider_id, aggregate_id pairs to
# ResourceProviderAggregate table.
if target_aggregates:
select_agg_id = sa.select([rp_id, models.PlacementAggregate.id]).\
where(models.PlacementAggregate.uuid.in_(target_aggregates))
insert_aggregates = models.ResourceProviderAggregate.__table__.\
insert().from_select(['resource_provider_id', 'aggregate_id'],
select_agg_id)
context.session.execute(insert_aggregates)
if increment_generation:
resource_provider.generation = _increment_provider_generation(
context, resource_provider)
@db_api.placement_context_manager.reader
def _get_traits_by_provider_id(context, rp_id):
t = sa.alias(_TRAIT_TBL, name='t')
rpt = sa.alias(_RP_TRAIT_TBL, name='rpt')
join_cond = sa.and_(t.c.id == rpt.c.trait_id,
rpt.c.resource_provider_id == rp_id)
join = sa.join(t, rpt, join_cond)
sel = sa.select([t.c.id, t.c.name,
t.c.created_at, t.c.updated_at]).select_from(join)
return [dict(r) for r in context.session.execute(sel).fetchall()]
def _add_traits_to_provider(ctx, rp_id, to_add):
"""Adds trait associations to the provider with the supplied ID.
:param ctx: `nova.context.RequestContext` that has an oslo_db Session
:param rp_id: Internal ID of the resource provider on which to add
trait associations
:param to_add: set() containing internal trait IDs for traits to add
"""
for trait_id in to_add:
try:
ins_stmt = _RP_TRAIT_TBL.insert().values(
resource_provider_id=rp_id,
trait_id=trait_id)
ctx.session.execute(ins_stmt)
except db_exc.DBDuplicateEntry:
# Another thread already set this trait for this provider. Ignore
# this for now (but ConcurrentUpdateDetected will end up being
# raised almost assuredly when we go to increment the resource
# provider's generation later, but that's also fine)
pass
def _delete_traits_from_provider(ctx, rp_id, to_delete):
"""Deletes trait associations from the provider with the supplied ID and
set() of internal trait IDs.
:param ctx: `nova.context.RequestContext` that has an oslo_db Session
:param rp_id: Internal ID of the resource provider from which to delete
trait associations
:param to_delete: set() containing internal trait IDs for traits to
delete
"""
del_stmt = _RP_TRAIT_TBL.delete().where(
sa.and_(
_RP_TRAIT_TBL.c.resource_provider_id == rp_id,
_RP_TRAIT_TBL.c.trait_id.in_(to_delete)))
ctx.session.execute(del_stmt)
@db_api.placement_context_manager.writer
def _set_traits(context, rp, traits):
"""Given a ResourceProvider object and a TraitList object, replaces the set
of traits associated with the resource provider.
:raises: ConcurrentUpdateDetected if the resource provider's traits or
inventory was changed in between the time when we first started to
set traits and the end of this routine.
:param rp: The ResourceProvider object to set traits against
:param traits: A TraitList object or list of Trait objects
"""
# Get the internal IDs of our existing traits
existing_traits = _get_traits_by_provider_id(context, rp.id)
existing_traits = set(rec['id'] for rec in existing_traits)
want_traits = set(trait.id for trait in traits)
to_add = want_traits - existing_traits
to_delete = existing_traits - want_traits
if not to_add and not to_delete:
return
if to_delete:
_delete_traits_from_provider(context, rp.id, to_delete)
if to_add:
_add_traits_to_provider(context, rp.id, to_add)
rp.generation = _increment_provider_generation(context, rp)
@db_api.placement_context_manager.reader
def _has_child_providers(context, rp_id):
"""Returns True if the supplied resource provider has any child providers,
False otherwise
"""
child_sel = sa.select([_RP_TBL.c.id])
child_sel = child_sel.where(_RP_TBL.c.parent_provider_id == rp_id)
child_res = context.session.execute(child_sel.limit(1)).fetchone()
if child_res:
return True
return False
@db_api.placement_context_manager.writer
def _set_root_provider_id(context, rp_id, root_id):
"""Simply sets the root_provider_id value for a provider identified by
rp_id. Used in online data migration.
:param rp_id: Internal ID of the provider to update
:param root_id: Value to set root provider to
"""
upd = _RP_TBL.update().where(_RP_TBL.c.id == rp_id)
upd = upd.values(root_provider_id=root_id)
context.session.execute(upd)
ProviderIds = collections.namedtuple(
'ProviderIds', 'id uuid parent_id parent_uuid root_id root_uuid')
def _provider_ids_from_rp_ids(context, rp_ids):
"""Given an iterable of internal resource provider IDs, returns a dict,
keyed by internal provider Id, of ProviderIds namedtuples describing those
providers.
:returns: dict, keyed by internal provider Id, of ProviderIds namedtuples
:param rp_ids: iterable of internal provider IDs to look up
"""
# SELECT
# rp.id, rp.uuid,
# parent.id AS parent_id, parent.uuid AS parent_uuid,
# root.id AS root_id, root.uuid AS root_uuid
# FROM resource_providers AS rp
# LEFT JOIN resource_providers AS parent
# ON rp.parent_provider_id = parent.id
# LEFT JOIN resource_providers AS root
# ON rp.root_provider_id = root.id
# WHERE rp.id IN ($rp_ids)
me = sa.alias(_RP_TBL, name="me")
parent = sa.alias(_RP_TBL, name="parent")
root = sa.alias(_RP_TBL, name="root")
cols = [
me.c.id,
me.c.uuid,
parent.c.id.label('parent_id'),
parent.c.uuid.label('parent_uuid'),
root.c.id.label('root_id'),
root.c.uuid.label('root_uuid'),
]
# TODO(jaypipes): Change this to an inner join when we are sure all
# root_provider_id values are NOT NULL
me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id)
me_to_parent = sa.outerjoin(me_to_root, parent,
me.c.parent_provider_id == parent.c.id)
sel = sa.select(cols).select_from(me_to_parent)
sel = sel.where(me.c.id.in_(rp_ids))
ret = {}
for r in context.session.execute(sel):
# Use its id/uuid for the root id/uuid if the root id/uuid is None
# TODO(tetsuro): Remove this to when we are sure all root_provider_id
# values are NOT NULL
d = dict(r)
if d['root_id'] is None:
d['root_id'] = d['id']
d['root_uuid'] = d['uuid']
ret[d['id']] = ProviderIds(**d)
return ret
def _provider_ids_from_uuid(context, uuid):
"""Given the UUID of a resource provider, returns a namedtuple
(ProviderIds) with the internal ID, the UUID, the parent provider's
internal ID, parent provider's UUID, the root provider's internal ID and
the root provider UUID.
:returns: ProviderIds object containing the internal IDs and UUIDs of the
provider identified by the supplied UUID
:param uuid: The UUID of the provider to look up
"""
# SELECT
# rp.id, rp.uuid,
# parent.id AS parent_id, parent.uuid AS parent_uuid,
# root.id AS root_id, root.uuid AS root_uuid
# FROM resource_providers AS rp
# LEFT JOIN resource_providers AS parent
# ON rp.parent_provider_id = parent.id
# LEFT JOIN resource_providers AS root
# ON rp.root_provider_id = root.id
me = sa.alias(_RP_TBL, name="me")
parent = sa.alias(_RP_TBL, name="parent")
root = sa.alias(_RP_TBL, name="root")
cols = [
me.c.id,
me.c.uuid,
parent.c.id.label('parent_id'),
parent.c.uuid.label('parent_uuid'),
root.c.id.label('root_id'),
root.c.uuid.label('root_uuid'),
]
# TODO(jaypipes): Change this to an inner join when we are sure all
# root_provider_id values are NOT NULL
me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id)
me_to_parent = sa.outerjoin(me_to_root, parent,
me.c.parent_provider_id == parent.c.id)
sel = sa.select(cols).select_from(me_to_parent)
sel = sel.where(me.c.uuid == uuid)
res = context.session.execute(sel).fetchone()
if not res:
return None
return ProviderIds(**dict(res))
def _provider_ids_matching_aggregates(context, member_of, rp_ids=None):
"""Given a list of lists of aggregate UUIDs, return the internal IDs of all
resource providers associated with the aggregates.
:param member_of: A list containing lists of aggregate UUIDs. Each item in
the outer list is to be AND'd together. If that item contains multiple
values, they are OR'd together.
For example, if member_of is::
[
['agg1'],
['agg2', 'agg3'],
]
we will return all the resource providers that are
associated with agg1 as well as either (agg2 or agg3)
:param rp_ids: When present, returned resource providers are limited
to only those in this value
:returns: A set of internal resource provider IDs having all required
aggregate associations
"""
# Given a request for the following:
#
# member_of = [
# [agg1],
# [agg2],
# [agg3, agg4]
# ]
#
# we need to produce the following SQL expression:
#
# SELECT
# rp.id
# FROM resource_providers AS rp
# JOIN resource_provider_aggregates AS rpa1
# ON rp.id = rpa1.resource_provider_id
# AND rpa1.aggregate_id IN ($AGG1_ID)
# JOIN resource_provider_aggregates AS rpa2
# ON rp.id = rpa2.resource_provider_id
# AND rpa2.aggregate_id IN ($AGG2_ID)
# JOIN resource_provider_aggregates AS rpa3
# ON rp.id = rpa3.resource_provider_id
# AND rpa3.aggregate_id IN ($AGG3_ID, $AGG4_ID)
# # Only if we have rp_ids...
# WHERE rp.id IN ($RP_IDs)
# First things first, get a map of all the aggregate UUID to internal
# aggregate IDs
agg_uuids = set()
for members in member_of:
for member in members:
agg_uuids.add(member)
agg_tbl = sa.alias(_AGG_TBL, name='aggs')
agg_sel = sa.select([agg_tbl.c.uuid, agg_tbl.c.id])
agg_sel = agg_sel.where(agg_tbl.c.uuid.in_(agg_uuids))
agg_uuid_map = {
r[0]: r[1] for r in context.session.execute(agg_sel).fetchall()
}
rp_tbl = sa.alias(_RP_TBL, name='rp')
join_chain = rp_tbl
for x, members in enumerate(member_of):
rpa_tbl = sa.alias(_RP_AGG_TBL, name='rpa%d' % x)
agg_ids = [agg_uuid_map[member] for member in members
if member in agg_uuid_map]
if not agg_ids:
# This member_of list contains only non-existent aggregate UUIDs
# and therefore we will always return 0 results, so short-circuit
return []
join_cond = sa.and_(
rp_tbl.c.id == rpa_tbl.c.resource_provider_id,
rpa_tbl.c.aggregate_id.in_(agg_ids))
join_chain = sa.join(join_chain, rpa_tbl, join_cond)
sel = sa.select([rp_tbl.c.id]).select_from(join_chain)
if rp_ids:
sel = sel.where(rp_tbl.c.id.in_(rp_ids))
return set(r[0] for r in context.session.execute(sel))
@db_api.placement_context_manager.writer
def _delete_rp_record(context, _id):
return context.session.query(models.ResourceProvider).\
filter(models.ResourceProvider.id == _id).\
delete(synchronize_session=False)
@base.VersionedObjectRegistry.register_if(False)
class ResourceProvider(base.VersionedObject, base.TimestampedObject):
SETTABLE_FIELDS = ('name', 'parent_provider_uuid')
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(nullable=False),
'name': fields.StringField(nullable=False),
'generation': fields.IntegerField(nullable=False),
# UUID of the root provider in a hierarchy of providers. Will be equal
# to the uuid field if this provider is the root provider of a
# hierarchy. This field is never manually set by the user. Instead, it
# is automatically set to either the root provider UUID of the parent
# or the UUID of the provider itself if there is no parent. This field
# is an optimization field that allows us to very quickly query for all
# providers within a particular tree without doing any recursive
# querying.
'root_provider_uuid': fields.UUIDField(nullable=False),
# UUID of the direct parent provider, or None if this provider is a
# "root" provider.
'parent_provider_uuid': fields.UUIDField(nullable=True, default=None),
}
def create(self):
if 'id' in self:
raise exception.ObjectActionError(action='create',
reason='already created')
if 'uuid' not in self:
raise exception.ObjectActionError(action='create',
reason='uuid is required')
if 'name' not in self:
raise exception.ObjectActionError(action='create',
reason='name is required')
if 'root_provider_uuid' in self:
raise exception.ObjectActionError(
action='create',
reason=_('root provider UUID cannot be manually set.'))
self.obj_set_defaults()
updates = self.obj_get_changes()
self._create_in_db(self._context, updates)
self.obj_reset_changes()
def destroy(self):
self._delete(self._context, self.id)
def save(self):
updates = self.obj_get_changes()
if updates and any(k not in self.SETTABLE_FIELDS
for k in updates.keys()):
raise exception.ObjectActionError(
action='save',
reason='Immutable fields changed')
self._update_in_db(self._context, self.id, updates)
self.obj_reset_changes()
@classmethod
def get_by_uuid(cls, context, uuid):
"""Returns a new ResourceProvider object with the supplied UUID.
:raises NotFound if no such provider could be found
:param uuid: UUID of the provider to search for
"""
rp_rec = _get_provider_by_uuid(context, uuid)
return cls._from_db_object(context, cls(), rp_rec)
def add_inventory(self, inventory):
"""Add one new Inventory to the resource provider.
Fails if Inventory of the provided resource class is
already present.
"""
_add_inventory(self._context, self, inventory)
self.obj_reset_changes()
def delete_inventory(self, resource_class):
"""Delete Inventory of provided resource_class."""
_delete_inventory(self._context, self, resource_class)
self.obj_reset_changes()
def set_inventory(self, inv_list):
"""Set all resource provider Inventory to be the provided list."""
exceeded = _set_inventory(self._context, self, inv_list)
for uuid, rclass in exceeded:
LOG.warning('Resource provider %(uuid)s is now over-'
'capacity for %(resource)s',
{'uuid': uuid, 'resource': rclass})
self.obj_reset_changes()
def update_inventory(self, inventory):
"""Update one existing Inventory of the same resource class.
Fails if no Inventory of the same class is present.
"""
exceeded = _update_inventory(self._context, self, inventory)
for uuid, rclass in exceeded:
LOG.warning('Resource provider %(uuid)s is now over-'
'capacity for %(resource)s',
{'uuid': uuid, 'resource': rclass})
self.obj_reset_changes()
def get_aggregates(self):
"""Get the aggregate uuids associated with this resource provider."""
return _get_aggregates_by_provider_id(self._context, self.id)
def set_aggregates(self, aggregate_uuids, increment_generation=False):
"""Set the aggregate uuids associated with this resource provider.
If an aggregate does not exist, one will be created using the
provided uuid.
The resource provider generation is incremented if and only if the
increment_generation parameter is True.
"""
_set_aggregates(self._context, self, aggregate_uuids,
increment_generation=increment_generation)
def set_traits(self, traits):
"""Replaces the set of traits associated with the resource provider
with the given list of Trait objects.
:param traits: A list of Trait objects representing the traits to
associate with the provider.
"""
_set_traits(self._context, self, traits)
self.obj_reset_changes()
@db_api.placement_context_manager.writer
def _create_in_db(self, context, updates):
parent_id = None
root_id = None
# User supplied a parent, let's make sure it exists
parent_uuid = updates.pop('parent_provider_uuid')
if parent_uuid is not None:
# Setting parent to ourselves doesn't make any sense
if parent_uuid == self.uuid:
raise exception.ObjectActionError(
action='create',
reason=_('parent provider UUID cannot be same as '
'UUID. Please set parent provider UUID to '
'None if there is no parent.'))
parent_ids = _provider_ids_from_uuid(context, parent_uuid)
if parent_ids is None:
raise exception.ObjectActionError(
action='create',
reason=_('parent provider UUID does not exist.'))
parent_id = parent_ids.id
root_id = parent_ids.root_id
updates['root_provider_id'] = root_id
updates['parent_provider_id'] = parent_id
self.root_provider_uuid = parent_ids.root_uuid
db_rp = models.ResourceProvider()
db_rp.update(updates)
context.session.add(db_rp)
context.session.flush()
self.id = db_rp.id
self.generation = db_rp.generation
if root_id is None:
# User did not specify a parent when creating this provider, so the
# root_provider_id needs to be set to this provider's newly-created
# internal ID
db_rp.root_provider_id = db_rp.id
context.session.add(db_rp)
context.session.flush()
self.root_provider_uuid = self.uuid
@staticmethod
@db_api.placement_context_manager.writer
def _delete(context, _id):
# Do a quick check to see if the provider is a parent. If it is, don't
# allow deleting the provider. Note that the foreign key constraint on
# resource_providers.parent_provider_id will prevent deletion of the
# parent within the transaction below. This is just a quick
# short-circuit outside of the transaction boundary.
if _has_child_providers(context, _id):
raise exception.CannotDeleteParentResourceProvider()
# Don't delete the resource provider if it has allocations.
rp_allocations = context.session.query(models.Allocation).\
filter(models.Allocation.resource_provider_id == _id).\
count()
if rp_allocations:
raise exception.ResourceProviderInUse()
# Delete any inventory associated with the resource provider
context.session.query(models.Inventory).\
filter(models.Inventory.resource_provider_id == _id).\
delete(synchronize_session=False)
# Delete any aggregate associations for the resource provider
# The name substitution on the next line is needed to satisfy pep8
RPA_model = models.ResourceProviderAggregate
context.session.query(RPA_model).\
filter(RPA_model.resource_provider_id == _id).delete()
# delete any trait associations for the resource provider
RPT_model = models.ResourceProviderTrait
context.session.query(RPT_model).\
filter(RPT_model.resource_provider_id == _id).delete()
# set root_provider_id to null to make deletion possible
context.session.query(models.ResourceProvider).\
filter(models.ResourceProvider.id == _id,
models.ResourceProvider.root_provider_id == _id).\
update({'root_provider_id': None})
# Now delete the RP record
try:
result = _delete_rp_record(context, _id)
except sqla_exc.IntegrityError:
# NOTE(jaypipes): Another thread snuck in and parented this
# resource provider in between the above check for
# _has_child_providers() and our attempt to delete the record
raise exception.CannotDeleteParentResourceProvider()
if not result:
raise exception.NotFound()
@db_api.placement_context_manager.writer
def _update_in_db(self, context, id, updates):
# A list of resource providers in the same tree with the
# resource provider to update
same_tree = []
if 'parent_provider_uuid' in updates:
# TODO(jaypipes): For now, "re-parenting" and "un-parenting" are
# not possible. If the provider already had a parent, we don't
# allow changing that parent due to various issues, including:
#
# * if the new parent is a descendant of this resource provider, we
# introduce the possibility of a loop in the graph, which would
# be very bad
# * potentially orphaning heretofore-descendants
#
# So, for now, let's just prevent re-parenting...
my_ids = _provider_ids_from_uuid(context, self.uuid)
parent_uuid = updates.pop('parent_provider_uuid')
if parent_uuid is not None:
parent_ids = _provider_ids_from_uuid(context, parent_uuid)
# User supplied a parent, let's make sure it exists
if parent_ids is None:
raise exception.ObjectActionError(
action='create',
reason=_('parent provider UUID does not exist.'))
if (my_ids.parent_id is not None and
my_ids.parent_id != parent_ids.id):
raise exception.ObjectActionError(
action='update',
reason=_('re-parenting a provider is not '
'currently allowed.'))
if my_ids.parent_uuid is None:
# So the user specifies a parent for an RP that doesn't
# have one. We have to check that by this new parent we
# don't create a loop in the tree. Basically the new parent
# cannot be the RP itself or one of its descendants.
# However as the RP's current parent is None the above
# condition is the same as "the new parent cannot be any RP
# from the current RP tree".
same_tree = ResourceProviderList.get_all_by_filters(
context,
filters={'in_tree': self.uuid})
rp_uuids_in_the_same_tree = [rp.uuid for rp in same_tree]
if parent_uuid in rp_uuids_in_the_same_tree:
raise exception.ObjectActionError(
action='update',
reason=_('creating loop in the provider tree is '
'not allowed.'))
updates['root_provider_id'] = parent_ids.root_id
updates['parent_provider_id'] = parent_ids.id
self.root_provider_uuid = parent_ids.root_uuid
else:
if my_ids.parent_id is not None:
raise exception.ObjectActionError(
action='update',
reason=_('un-parenting a provider is not '
'currently allowed.'))
db_rp = context.session.query(models.ResourceProvider).filter_by(
id=id).first()
db_rp.update(updates)
context.session.add(db_rp)
# We should also update the root providers of resource providers
# originally in the same tree. If re-parenting is supported,
# this logic should be changed to update only descendents of the
# re-parented resource providers, not all the providers in the tree.
for rp in same_tree:
# If the parent is not updated, this clause is skipped since the
# `same_tree` has no element.
rp.root_provider_uuid = parent_ids.root_uuid
db_rp = context.session.query(
models.ResourceProvider).filter_by(id=rp.id).first()
data = {'root_provider_id': parent_ids.root_id}
db_rp.update(data)
context.session.add(db_rp)
try:
context.session.flush()
except sqla_exc.IntegrityError:
# NOTE(jaypipes): Another thread snuck in and deleted the parent
# for this resource provider in between the above check for a valid
# parent provider and here...
raise exception.ObjectActionError(
action='update',
reason=_('parent provider UUID does not exist.'))
@staticmethod
@db_api.placement_context_manager.writer # For online data migration
def _from_db_object(context, resource_provider, db_resource_provider):
# Online data migration to populate root_provider_id
# TODO(jaypipes): Remove when all root_provider_id values are NOT NULL
if db_resource_provider['root_provider_uuid'] is None:
rp_id = db_resource_provider['id']
uuid = db_resource_provider['uuid']
db_resource_provider['root_provider_uuid'] = uuid
_set_root_provider_id(context, rp_id, rp_id)
for field in resource_provider.fields:
setattr(resource_provider, field, db_resource_provider[field])
resource_provider._context = context
resource_provider.obj_reset_changes()
return resource_provider
@db_api.placement_context_manager.reader
def _get_providers_with_shared_capacity(ctx, rc_id, amount, member_of=None):
"""Returns a list of resource provider IDs (internal IDs, not UUIDs)
that have capacity for a requested amount of a resource and indicate that
they share resource via an aggregate association.
Shared resource providers are marked with a standard trait called
MISC_SHARES_VIA_AGGREGATE. This indicates that the provider allows its
inventory to be consumed by other resource providers associated via an
aggregate link.
For example, assume we have two compute nodes, CN_1 and CN_2, each with
inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are
compute nodes with no local disk). There is a resource provider called
"NFS_SHARE" that has an inventory of DISK_GB and has the
MISC_SHARES_VIA_AGGREGATE trait. Both the "CN_1" and "CN_2" compute node
resource providers and the "NFS_SHARE" resource provider are associated
with an aggregate called "AGG_1".
The scheduler needs to determine the resource providers that can fulfill a
request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB.
Clearly, no single provider can satisfy the request for all three
resources, since neither compute node has DISK_GB inventory and the
NFS_SHARE provider has no VCPU or MEMORY_MB inventories.
However, if we consider the NFS_SHARE resource provider as providing
inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2
as potential fits for the requested set of resources.
To facilitate that matching query, this function returns all providers that
indicate they share their inventory with providers in some aggregate and
have enough capacity for the requested amount of a resource.
To follow the example above, if we were to call
_get_providers_with_shared_capacity(ctx, "DISK_GB", 100), we would want to
get back the ID for the NFS_SHARE resource provider.
:param rc_id: Internal ID of the requested resource class.
:param amount: Amount of the requested resource.
:param member_of: When present, contains a list of lists of aggregate
uuids that are used to filter the returned list of
resource providers that *directly* belong to the
aggregates referenced.
"""
# The SQL we need to generate here looks like this:
#
# SELECT rp.id
# FROM resource_providers AS rp
# INNER JOIN resource_provider_traits AS rpt
# ON rp.id = rpt.resource_provider_id
# INNER JOIN traits AS t
# ON rpt.trait_id = t.id
# AND t.name = "MISC_SHARES_VIA_AGGREGATE"
# INNER JOIN inventories AS inv
# ON rp.id = inv.resource_provider_id
# AND inv.resource_class_id = $rc_id
# LEFT JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = $rc_id
# GROUP BY resource_provider_id
# ) AS usage
# ON rp.id = usage.resource_provider_id
# WHERE COALESCE(usage.used, 0) + $amount <= (
# inv.total - inv.reserved) * inv.allocation_ratio
# ) AND
# inv.min_unit <= $amount AND
# inv.max_unit >= $amount AND
# $amount % inv.step_size = 0
# GROUP BY rp.id
rp_tbl = sa.alias(_RP_TBL, name='rp')
inv_tbl = sa.alias(_INV_TBL, name='inv')
t_tbl = sa.alias(_TRAIT_TBL, name='t')
rpt_tbl = sa.alias(_RP_TRAIT_TBL, name='rpt')
rp_to_rpt_join = sa.join(
rp_tbl, rpt_tbl,
rp_tbl.c.id == rpt_tbl.c.resource_provider_id,
)
rpt_to_t_join = sa.join(
rp_to_rpt_join, t_tbl,
sa.and_(
rpt_tbl.c.trait_id == t_tbl.c.id,
# The traits table wants unicode trait names, but os_traits
# presents native str, so we need to cast.
t_tbl.c.name == six.text_type(os_traits.MISC_SHARES_VIA_AGGREGATE),
),
)
rp_to_inv_join = sa.join(
rpt_to_t_join, inv_tbl,
sa.and_(
rpt_tbl.c.resource_provider_id == inv_tbl.c.resource_provider_id,
inv_tbl.c.resource_class_id == rc_id,
),
)
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
sql.func.sum(_ALLOC_TBL.c.used).label('used')])
usage = usage.where(_ALLOC_TBL.c.resource_class_id == rc_id)
usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id)
usage = sa.alias(usage, name='usage')
inv_to_usage_join = sa.outerjoin(
rp_to_inv_join, usage,
inv_tbl.c.resource_provider_id == usage.c.resource_provider_id,
)
where_conds = sa.and_(
func.coalesce(usage.c.used, 0) + amount <= (
inv_tbl.c.total - inv_tbl.c.reserved) * inv_tbl.c.allocation_ratio,
inv_tbl.c.min_unit <= amount,
inv_tbl.c.max_unit >= amount,
amount % inv_tbl.c.step_size == 0)
# If 'member_of' has values, do a separate lookup to identify the
# resource providers that meet the member_of constraints.
if member_of:
rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of)
if not rps_in_aggs:
# Short-circuit. The user either asked for a non-existing
# aggregate or there were no resource providers that matched
# the requirements...
return []
where_conds.append(rp_tbl.c.id.in_(rps_in_aggs))
sel = sa.select([rp_tbl.c.id]).select_from(inv_to_usage_join)
sel = sel.where(where_conds)
sel = sel.group_by(rp_tbl.c.id)
return [r[0] for r in ctx.session.execute(sel)]
@base.VersionedObjectRegistry.register_if(False)
class ResourceProviderList(base.ObjectListBase, base.VersionedObject):
fields = {
'objects': fields.ListOfObjectsField('ResourceProvider'),
}
@staticmethod
@db_api.placement_context_manager.reader
def _get_all_by_filters_from_db(context, filters):
# Eg. filters can be:
# filters = {
# 'name': <name>,
# 'uuid': <uuid>,
# 'member_of': [[<aggregate_uuid>, <aggregate_uuid>],
# [<aggregate_uuid>]]
# 'resources': {
# 'VCPU': 1,
# 'MEMORY_MB': 1024
# },
# 'in_tree': <uuid>,
# 'required': [<trait_name>, ...]
# }
if not filters:
filters = {}
else:
# Since we modify the filters, copy them so that we don't modify
# them in the calling program.
filters = copy.deepcopy(filters)
name = filters.pop('name', None)
uuid = filters.pop('uuid', None)
member_of = filters.pop('member_of', [])
required = set(filters.pop('required', []))
forbidden = set([trait for trait in required
if trait.startswith('!')])
required = required - forbidden
forbidden = set([trait.lstrip('!') for trait in forbidden])
resources = filters.pop('resources', {})
# NOTE(sbauza): We want to key the dict by the resource class IDs
# and we want to make sure those class names aren't incorrect.
resources = {_RC_CACHE.id_from_string(r_name): amount
for r_name, amount in resources.items()}
rp = sa.alias(_RP_TBL, name="rp")
root_rp = sa.alias(_RP_TBL, name="root_rp")
parent_rp = sa.alias(_RP_TBL, name="parent_rp")
cols = [
rp.c.id,
rp.c.uuid,
rp.c.name,
rp.c.generation,
rp.c.updated_at,
rp.c.created_at,
root_rp.c.uuid.label("root_provider_uuid"),
parent_rp.c.uuid.label("parent_provider_uuid"),
]
# TODO(jaypipes): Convert this to an inner join once all
# root_provider_id values are NOT NULL
rp_to_root = sa.outerjoin(rp, root_rp,
rp.c.root_provider_id == root_rp.c.id)
rp_to_parent = sa.outerjoin(rp_to_root, parent_rp,
rp.c.parent_provider_id == parent_rp.c.id)
query = sa.select(cols).select_from(rp_to_parent)
if name:
query = query.where(rp.c.name == name)
if uuid:
query = query.where(rp.c.uuid == uuid)
if 'in_tree' in filters:
# The 'in_tree' parameter is the UUID of a resource provider that
# the caller wants to limit the returned providers to only those
# within its "provider tree". So, we look up the resource provider
# having the UUID specified by the 'in_tree' parameter and grab the
# root_provider_id value of that record. We can then ask for only
# those resource providers having a root_provider_id of that value.
tree_uuid = filters.pop('in_tree')
tree_ids = _provider_ids_from_uuid(context, tree_uuid)
if tree_ids is None:
# List operations should simply return an empty list when a
# non-existing resource provider UUID is given.
return []
root_id = tree_ids.root_id
# TODO(jaypipes): Remove this OR condition when root_provider_id
# is not nullable in the database and all resource provider records
# have populated the root provider ID.
where_cond = sa.or_(rp.c.id == root_id,
rp.c.root_provider_id == root_id)
query = query.where(where_cond)
# If 'member_of' has values, do a separate lookup to identify the
# resource providers that meet the member_of constraints.
if member_of:
rps_in_aggs = _provider_ids_matching_aggregates(context, member_of)
if not rps_in_aggs:
# Short-circuit. The user either asked for a non-existing
# aggregate or there were no resource providers that matched
# the requirements...
return []
query = query.where(rp.c.id.in_(rps_in_aggs))
# If 'required' has values, add a filter to limit results to providers
# possessing *all* of the listed traits.
if required:
trait_map = _trait_ids_from_names(context, required)
if len(trait_map) != len(required):
missing = required - set(trait_map)
raise exception.TraitNotFound(names=', '.join(missing))
rp_ids = _get_provider_ids_having_all_traits(context, trait_map)
if not rp_ids:
# If no providers have the required traits, we're done
return []
query = query.where(rp.c.id.in_(rp_ids))
# If 'forbidden' has values, filter out those providers that have
# that trait as one their traits.
if forbidden:
trait_map = _trait_ids_from_names(context, forbidden)
if len(trait_map) != len(forbidden):
missing = forbidden - set(trait_map)
raise exception.TraitNotFound(names=', '.join(missing))
rp_ids = _get_provider_ids_having_any_trait(context, trait_map)
if rp_ids:
query = query.where(~rp.c.id.in_(rp_ids))
if not resources:
# Returns quickly the list in case we don't need to check the
# resource usage
res = context.session.execute(query).fetchall()
return [dict(r) for r in res]
# NOTE(sbauza): In case we want to look at the resource criteria, then
# the SQL generated from this case looks something like:
# SELECT
# rp.*
# FROM resource_providers AS rp
# JOIN inventories AS inv
# ON rp.id = inv.resource_provider_id
# LEFT JOIN (
# SELECT resource_provider_id, resource_class_id, SUM(used) AS used
# FROM allocations
# WHERE resource_class_id IN ($RESOURCE_CLASSES)
# GROUP BY resource_provider_id, resource_class_id
# ) AS usage
# ON inv.resource_provider_id = usage.resource_provider_id
# AND inv.resource_class_id = usage.resource_class_id
# AND (inv.resource_class_id = $X AND (used + $AMOUNT_X <= (
# total - reserved) * inv.allocation_ratio) AND
# inv.min_unit <= $AMOUNT_X AND inv.max_unit >= $AMOUNT_X AND
# $AMOUNT_X % inv.step_size == 0)
# OR (inv.resource_class_id = $Y AND (used + $AMOUNT_Y <= (
# total - reserved) * inv.allocation_ratio) AND
# inv.min_unit <= $AMOUNT_Y AND inv.max_unit >= $AMOUNT_Y AND
# $AMOUNT_Y % inv.step_size == 0)
# OR (inv.resource_class_id = $Z AND (used + $AMOUNT_Z <= (
# total - reserved) * inv.allocation_ratio) AND
# inv.min_unit <= $AMOUNT_Z AND inv.max_unit >= $AMOUNT_Z AND
# $AMOUNT_Z % inv.step_size == 0))
# GROUP BY rp.id
# HAVING
# COUNT(DISTINCT(inv.resource_class_id)) == len($RESOURCE_CLASSES)
#
# with a possible additional WHERE clause for the name and uuid that
# comes from the above filters
# First JOIN between inventories and RPs is here
inv_join = sa.join(rp_to_parent, _INV_TBL,
rp.c.id == _INV_TBL.c.resource_provider_id)
# Now, below is the LEFT JOIN for getting the allocations usage
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id,
sql.func.sum(_ALLOC_TBL.c.used).label('used')])
usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(resources))
usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id)
usage = sa.alias(usage, name='usage')
usage_join = sa.outerjoin(inv_join, usage,
sa.and_(
usage.c.resource_provider_id == (
_INV_TBL.c.resource_provider_id),
usage.c.resource_class_id == _INV_TBL.c.resource_class_id))
# And finally, we verify for each resource class if the requested
# amount isn't more than the left space (considering the allocation
# ratio, the reserved space and the min and max amount possible sizes)
where_clauses = [
sa.and_(
_INV_TBL.c.resource_class_id == r_idx,
(func.coalesce(usage.c.used, 0) + amount <= (
_INV_TBL.c.total - _INV_TBL.c.reserved
) * _INV_TBL.c.allocation_ratio),
_INV_TBL.c.min_unit <= amount,
_INV_TBL.c.max_unit >= amount,
amount % _INV_TBL.c.step_size == 0
)
for (r_idx, amount) in resources.items()]
query = query.select_from(usage_join)
query = query.where(sa.or_(*where_clauses))
query = query.group_by(rp.c.id, root_rp.c.uuid, parent_rp.c.uuid)
# NOTE(sbauza): Only RPs having all the asked resources can be provided
query = query.having(sql.func.count(
sa.distinct(_INV_TBL.c.resource_class_id)) == len(resources))
res = context.session.execute(query).fetchall()
return [dict(r) for r in res]
@classmethod
def get_all_by_filters(cls, context, filters=None):
"""Returns a list of `ResourceProvider` objects that have sufficient
resources in their inventories to satisfy the amounts specified in the
`filters` parameter.
If no resource providers can be found, the function will return an
empty list.
:param context: `nova.context.RequestContext` that may be used to grab
a DB connection.
:param filters: Can be `name`, `uuid`, `member_of`, `in_tree` or
`resources` where `member_of` is a list of list of
aggregate UUIDs, `in_tree` is a UUID of a resource
provider that we can use to find the root provider ID
of the tree of providers to filter results by and
`resources` is a dict of amounts keyed by resource
classes.
:type filters: dict
"""
resource_providers = cls._get_all_by_filters_from_db(context, filters)
return base.obj_make_list(context, cls(context),
ResourceProvider, resource_providers)
@base.VersionedObjectRegistry.register_if(False)
class Inventory(base.VersionedObject, base.TimestampedObject):
fields = {
'id': fields.IntegerField(read_only=True),
'resource_provider': fields.ObjectField('ResourceProvider'),
'resource_class': rc_fields.ResourceClassField(read_only=True),
'total': fields.NonNegativeIntegerField(),
'reserved': fields.NonNegativeIntegerField(default=0),
'min_unit': fields.NonNegativeIntegerField(default=1),
'max_unit': fields.NonNegativeIntegerField(default=1),
'step_size': fields.NonNegativeIntegerField(default=1),
'allocation_ratio': fields.NonNegativeFloatField(default=1.0),
}
@property
def capacity(self):
"""Inventory capacity, adjusted by allocation_ratio."""
return int((self.total - self.reserved) * self.allocation_ratio)
@db_api.placement_context_manager.reader
def _get_inventory_by_provider_id(ctx, rp_id):
inv = sa.alias(_INV_TBL, name="i")
cols = [
inv.c.resource_class_id,
inv.c.total,
inv.c.reserved,
inv.c.min_unit,
inv.c.max_unit,
inv.c.step_size,
inv.c.allocation_ratio,
inv.c.updated_at,
inv.c.created_at,
]
sel = sa.select(cols)
sel = sel.where(inv.c.resource_provider_id == rp_id)
return [dict(r) for r in ctx.session.execute(sel)]
@base.VersionedObjectRegistry.register_if(False)
class InventoryList(base.ObjectListBase, base.VersionedObject):
fields = {
'objects': fields.ListOfObjectsField('Inventory'),
}
def find(self, res_class):
"""Return the inventory record from the list of Inventory records that
matches the supplied resource class, or None.
:param res_class: An integer or string representing a resource
class. If the value is a string, the method first
looks up the resource class identifier from the
string.
"""
if not isinstance(res_class, six.string_types):
raise ValueError
for inv_rec in self.objects:
if inv_rec.resource_class == res_class:
return inv_rec
@classmethod
def get_all_by_resource_provider(cls, context, rp):
db_inv = _get_inventory_by_provider_id(context, rp.id)
# Build up a list of Inventory objects, setting the Inventory object
# fields to the same-named database record field we got from
# _get_inventory_by_provider_id(). We already have the ResourceProvider
# object so we just pass that object to the Inventory object
# constructor as-is
objs = [
Inventory(
context, resource_provider=rp,
resource_class=_RC_CACHE.string_from_id(
rec['resource_class_id']),
**rec)
for rec in db_inv
]
inv_list = cls(context, objects=objs)
return inv_list
@base.VersionedObjectRegistry.register_if(False)
class Allocation(base.VersionedObject, base.TimestampedObject):
fields = {
'id': fields.IntegerField(),
'resource_provider': fields.ObjectField('ResourceProvider'),
'consumer': fields.ObjectField('Consumer', nullable=False),
'resource_class': rc_fields.ResourceClassField(),
'used': fields.IntegerField(),
}
@db_api.placement_context_manager.writer
def _delete_allocations_for_consumer(ctx, consumer_id):
"""Deletes any existing allocations that correspond to the allocations to
be written. This is wrapped in a transaction, so if the write subsequently
fails, the deletion will also be rolled back.
"""
del_sql = _ALLOC_TBL.delete().where(
_ALLOC_TBL.c.consumer_id == consumer_id)
ctx.session.execute(del_sql)
@db_api.placement_context_manager.writer
def _delete_allocations_by_ids(ctx, alloc_ids):
"""Deletes allocations having an internal id value in the set of supplied
IDs
"""
del_sql = _ALLOC_TBL.delete().where(_ALLOC_TBL.c.id.in_(alloc_ids))
ctx.session.execute(del_sql)
def _check_capacity_exceeded(ctx, allocs):
"""Checks to see if the supplied allocation records would result in any of
the inventories involved having their capacity exceeded.
Raises an InvalidAllocationCapacityExceeded exception if any inventory
would be exhausted by the allocation. Raises an
InvalidAllocationConstraintsViolated exception if any of the `step_size`,
`min_unit` or `max_unit` constraints in an inventory will be violated
by any one of the allocations.
If no inventories would be exceeded or violated by the allocations, the
function returns a list of `ResourceProvider` objects that contain the
generation at the time of the check.
:param ctx: `nova.context.RequestContext` that has an oslo_db Session
:param allocs: List of `Allocation` objects to check
"""
# The SQL generated below looks like this:
# SELECT
# rp.id,
# rp.uuid,
# rp.generation,
# inv.resource_class_id,
# inv.total,
# inv.reserved,
# inv.allocation_ratio,
# allocs.used
# FROM resource_providers AS rp
# JOIN inventories AS i1
# ON rp.id = i1.resource_provider_id
# LEFT JOIN (
# SELECT resource_provider_id, resource_class_id, SUM(used) AS used
# FROM allocations
# WHERE resource_class_id IN ($RESOURCE_CLASSES)
# AND resource_provider_id IN ($RESOURCE_PROVIDERS)
# GROUP BY resource_provider_id, resource_class_id
# ) AS allocs
# ON inv.resource_provider_id = allocs.resource_provider_id
# AND inv.resource_class_id = allocs.resource_class_id
# WHERE rp.id IN ($RESOURCE_PROVIDERS)
# AND inv.resource_class_id IN ($RESOURCE_CLASSES)
#
# We then take the results of the above and determine if any of the
# inventory will have its capacity exceeded.
rc_ids = set([_RC_CACHE.id_from_string(a.resource_class)
for a in allocs])
provider_uuids = set([a.resource_provider.uuid for a in allocs])
provider_ids = set([a.resource_provider.id for a in allocs])
usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id,
sql.func.sum(_ALLOC_TBL.c.used).label('used')])
usage = usage.where(
sa.and_(_ALLOC_TBL.c.resource_class_id.in_(rc_ids),
_ALLOC_TBL.c.resource_provider_id.in_(provider_ids)))
usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
_ALLOC_TBL.c.resource_class_id)
usage = sa.alias(usage, name='usage')
inv_join = sql.join(_RP_TBL, _INV_TBL,
sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id,
_INV_TBL.c.resource_class_id.in_(rc_ids)))
primary_join = sql.outerjoin(inv_join, usage,
sql.and_(
_INV_TBL.c.resource_provider_id == usage.c.resource_provider_id,
_INV_TBL.c.resource_class_id == usage.c.resource_class_id)
)
cols_in_output = [
_RP_TBL.c.id.label('resource_provider_id'),
_RP_TBL.c.uuid,
_RP_TBL.c.generation,
_INV_TBL.c.resource_class_id,
_INV_TBL.c.total,
_INV_TBL.c.reserved,
_INV_TBL.c.allocation_ratio,
_INV_TBL.c.min_unit,
_INV_TBL.c.max_unit,
_INV_TBL.c.step_size,
usage.c.used,
]
sel = sa.select(cols_in_output).select_from(primary_join)
sel = sel.where(
sa.and_(_RP_TBL.c.id.in_(provider_ids),
_INV_TBL.c.resource_class_id.in_(rc_ids)))
records = ctx.session.execute(sel)
# Create a map keyed by (rp_uuid, res_class) for the records in the DB
usage_map = {}
provs_with_inv = set()
for record in records:
map_key = (record['uuid'], record['resource_class_id'])
if map_key in usage_map:
raise KeyError("%s already in usage_map, bad query" % str(map_key))
usage_map[map_key] = record
provs_with_inv.add(record["uuid"])
# Ensure that all providers have existing inventory
missing_provs = provider_uuids - provs_with_inv
if missing_provs:
class_str = ', '.join([_RC_CACHE.string_from_id(rc_id)
for rc_id in rc_ids])
provider_str = ', '.join(missing_provs)
raise exception.InvalidInventory(resource_class=class_str,
resource_provider=provider_str)
res_providers = {}
rp_resource_class_sum = collections.defaultdict(
lambda: collections.defaultdict(int))
for alloc in allocs:
rc_id = _RC_CACHE.id_from_string(alloc.resource_class)
rp_uuid = alloc.resource_provider.uuid
if rp_uuid not in res_providers:
res_providers[rp_uuid] = alloc.resource_provider
amount_needed = alloc.used
rp_resource_class_sum[rp_uuid][rc_id] += amount_needed
# No use checking usage if we're not asking for anything
if amount_needed == 0:
continue
key = (rp_uuid, rc_id)
try:
usage = usage_map[key]
except KeyError:
# The resource class at rc_id is not in the usage map.
raise exception.InvalidInventory(
resource_class=alloc.resource_class,
resource_provider=rp_uuid)
allocation_ratio = usage['allocation_ratio']
min_unit = usage['min_unit']
max_unit = usage['max_unit']
step_size = usage['step_size']
# check min_unit, max_unit, step_size
if (amount_needed < min_unit or amount_needed > max_unit or
amount_needed % step_size != 0):
LOG.warning(
"Allocation for %(rc)s on resource provider %(rp)s "
"violates min_unit, max_unit, or step_size. "
"Requested: %(requested)s, min_unit: %(min_unit)s, "
"max_unit: %(max_unit)s, step_size: %(step_size)s",
{'rc': alloc.resource_class,
'rp': rp_uuid,
'requested': amount_needed,
'min_unit': min_unit,
'max_unit': max_unit,
'step_size': step_size})
raise exception.InvalidAllocationConstraintsViolated(
resource_class=alloc.resource_class,
resource_provider=rp_uuid)
# usage["used"] can be returned as None
used = usage['used'] or 0
capacity = (usage['total'] - usage['reserved']) * allocation_ratio
if (capacity < (used + amount_needed) or
capacity < (used + rp_resource_class_sum[rp_uuid][rc_id])):
LOG.warning(
"Over capacity for %(rc)s on resource provider %(rp)s. "
"Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s",
{'rc': alloc.resource_class,
'rp': rp_uuid,
'needed': amount_needed,
'used': used,
'cap': capacity})
raise exception.InvalidAllocationCapacityExceeded(
resource_class=alloc.resource_class,
resource_provider=rp_uuid)
return res_providers
@db_api.placement_context_manager.reader
def _get_allocations_by_provider_id(ctx, rp_id):
allocs = sa.alias(_ALLOC_TBL, name="a")
consumers = sa.alias(_CONSUMER_TBL, name="c")
projects = sa.alias(_PROJECT_TBL, name="p")
users = sa.alias(_USER_TBL, name="u")
cols = [
allocs.c.id,
allocs.c.resource_class_id,
allocs.c.used,
allocs.c.updated_at,
allocs.c.created_at,
consumers.c.id.label("consumer_id"),
consumers.c.generation.label("consumer_generation"),
sql.func.coalesce(
consumers.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
projects.c.id.label("project_id"),
projects.c.external_id.label("project_external_id"),
users.c.id.label("user_id"),
users.c.external_id.label("user_external_id"),
]
# TODO(jaypipes): change this join to be on ID not UUID
consumers_join = sa.join(
allocs, consumers, allocs.c.consumer_id == consumers.c.uuid)
projects_join = sa.join(
consumers_join, projects, consumers.c.project_id == projects.c.id)
users_join = sa.join(
projects_join, users, consumers.c.user_id == users.c.id)
sel = sa.select(cols).select_from(users_join)
sel = sel.where(allocs.c.resource_provider_id == rp_id)
return [dict(r) for r in ctx.session.execute(sel)]
@db_api.placement_context_manager.reader
def _get_allocations_by_consumer_uuid(ctx, consumer_uuid):
allocs = sa.alias(_ALLOC_TBL, name="a")
rp = sa.alias(_RP_TBL, name="rp")
consumer = sa.alias(_CONSUMER_TBL, name="c")
project = sa.alias(_PROJECT_TBL, name="p")
user = sa.alias(_USER_TBL, name="u")
cols = [
allocs.c.id,
allocs.c.resource_provider_id,
rp.c.name.label("resource_provider_name"),
rp.c.uuid.label("resource_provider_uuid"),
rp.c.generation.label("resource_provider_generation"),
allocs.c.resource_class_id,
allocs.c.used,
consumer.c.id.label("consumer_id"),
consumer.c.generation.label("consumer_generation"),
sql.func.coalesce(
consumer.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
project.c.id.label("project_id"),
project.c.external_id.label("project_external_id"),
user.c.id.label("user_id"),
user.c.external_id.label("user_external_id"),
]
# Build up the joins of the five tables we need to interact with.
rp_join = sa.join(allocs, rp, allocs.c.resource_provider_id == rp.c.id)
consumer_join = sa.join(rp_join, consumer,
allocs.c.consumer_id == consumer.c.uuid)
project_join = sa.join(consumer_join, project,
consumer.c.project_id == project.c.id)
user_join = sa.join(project_join, user,
consumer.c.user_id == user.c.id)
sel = sa.select(cols).select_from(user_join)
sel = sel.where(allocs.c.consumer_id == consumer_uuid)
return [dict(r) for r in ctx.session.execute(sel)]
@db_api.placement_context_manager.writer.independent
def _create_incomplete_consumers_for_provider(ctx, rp_id):
# TODO(jaypipes): Remove in Stein after a blocker migration is added.
"""Creates consumer record if consumer relationship between allocations ->
consumers table is missing for any allocation on the supplied provider
internal ID, using the "incomplete consumer" project and user CONF options.
"""
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, consumer_obj.CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
sel = sa.select([_ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
missing = ctx.session.execute(sel).fetchall()
if missing:
# Do a single INSERT for all missing consumer relationships for the
# provider
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
cols = [
_ALLOC_TBL.c.consumer_id,
incomplete_proj_id,
incomplete_user_id,
]
sel = sa.select(cols)
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.resource_provider_id == rp_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
# NOTE(mnaser): It is possible to have multiple consumers having many
# allocations to the same resource provider, which would
# make the INSERT FROM SELECT fail due to duplicates.
sel = sel.group_by(_ALLOC_TBL.c.consumer_id)
target_cols = ['uuid', 'project_id', 'user_id']
ins_stmt = consumer_obj.CONSUMER_TBL.insert().from_select(
target_cols, sel)
res = ctx.session.execute(ins_stmt)
if res.rowcount > 0:
LOG.info("Online data migration to fix incomplete consumers "
"for resource provider %s has been run. Migrated %d "
"incomplete consumer records on the fly.", rp_id,
res.rowcount)
@db_api.placement_context_manager.writer.independent
def _create_incomplete_consumer(ctx, consumer_id):
# TODO(jaypipes): Remove in Stein after a blocker migration is added.
"""Creates consumer record if consumer relationship between allocations ->
consumers table is missing for the supplied consumer UUID, using the
"incomplete consumer" project and user CONF options.
"""
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, consumer_obj.CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
sel = sa.select([_ALLOC_TBL.c.consumer_id])
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(
sa.and_(
_ALLOC_TBL.c.consumer_id == consumer_id,
consumer_obj.CONSUMER_TBL.c.id.is_(None)))
missing = ctx.session.execute(sel).fetchall()
if missing:
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
ins_stmt = consumer_obj.CONSUMER_TBL.insert().values(
uuid=consumer_id, project_id=incomplete_proj_id,
user_id=incomplete_user_id)
res = ctx.session.execute(ins_stmt)
if res.rowcount > 0:
LOG.info("Online data migration to fix incomplete consumers "
"for consumer %s has been run. Migrated %d incomplete "
"consumer records on the fly.", consumer_id, res.rowcount)
@base.VersionedObjectRegistry.register_if(False)
class AllocationList(base.ObjectListBase, base.VersionedObject):
# The number of times to retry set_allocations if there has
# been a resource provider (not consumer) generation coflict.
RP_CONFLICT_RETRY_COUNT = 10
fields = {
'objects': fields.ListOfObjectsField('Allocation'),
}
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@db_api.placement_context_manager.writer
def _set_allocations(self, context, allocs):
"""Write a set of allocations.
We must check that there is capacity for each allocation.
If there is not we roll back the entire set.
:raises `exception.ResourceClassNotFound` if any resource class in any
allocation in allocs cannot be found in either the standard
classes or the DB.
:raises `exception.InvalidAllocationCapacityExceeded` if any inventory
would be exhausted by the allocation.
:raises `InvalidAllocationConstraintsViolated` if any of the
`step_size`, `min_unit` or `max_unit` constraints in an
inventory will be violated by any one of the allocations.
:raises `ConcurrentUpdateDetected` if a generation for a resource
provider or consumer failed its increment check.
"""
# First delete any existing allocations for any consumers. This
# provides a clean slate for the consumers mentioned in the list of
# allocations being manipulated.
consumer_ids = set(alloc.consumer.uuid for alloc in allocs)
for consumer_id in consumer_ids:
_delete_allocations_for_consumer(context, consumer_id)
# Before writing any allocation records, we check that the submitted
# allocations do not cause any inventory capacity to be exceeded for
# any resource provider and resource class involved in the allocation
# transaction. _check_capacity_exceeded() raises an exception if any
# inventory capacity is exceeded. If capacity is not exceeeded, the
# function returns a list of ResourceProvider objects containing the
# generation of the resource provider at the time of the check. These
# objects are used at the end of the allocation transaction as a guard
# against concurrent updates.
#
# Don't check capacity when alloc.used is zero. Zero is not a valid
# amount when making an allocation (the minimum consumption of a
# resource is one) but is used in this method to indicate a need for
# removal. Providing 0 is controlled at the HTTP API layer where PUT
# /allocations does not allow empty allocations. When POST /allocations
# is implemented it will for the special case of atomically setting and
# removing different allocations in the same request.
# _check_capacity_exceeded will raise a ResourceClassNotFound # if any
# allocation is using a resource class that does not exist.
visited_consumers = {}
visited_rps = _check_capacity_exceeded(context, allocs)
for alloc in allocs:
if alloc.consumer.id not in visited_consumers:
visited_consumers[alloc.consumer.id] = alloc.consumer
# If alloc.used is set to zero that is a signal that we don't want
# to (re-)create any allocations for this resource class.
# _delete_current_allocs has already wiped out allocations so just
# continue
if alloc.used == 0:
continue
consumer_id = alloc.consumer.uuid
rp = alloc.resource_provider
rc_id = _RC_CACHE.id_from_string(alloc.resource_class)
ins_stmt = _ALLOC_TBL.insert().values(
resource_provider_id=rp.id,
resource_class_id=rc_id,
consumer_id=consumer_id,
used=alloc.used)
res = context.session.execute(ins_stmt)
alloc.id = res.lastrowid
alloc.obj_reset_changes()
# Generation checking happens here. If the inventory for this resource
# provider changed out from under us, this will raise a
# ConcurrentUpdateDetected which can be caught by the caller to choose
# to try again. It will also rollback the transaction so that these
# changes always happen atomically.
for rp in visited_rps.values():
rp.generation = _increment_provider_generation(context, rp)
for consumer in visited_consumers.values():
consumer.increment_generation()
# If any consumers involved in this transaction ended up having no
# allocations, delete the consumer records. Exclude consumers that had
# *some resource* in the allocation list with a total > 0 since clearly
# those consumers have allocations...
cons_with_allocs = set(a.consumer.uuid for a in allocs if a.used > 0)
all_cons = set(c.uuid for c in visited_consumers.values())
consumers_to_check = all_cons - cons_with_allocs
consumer_obj.delete_consumers_if_no_allocations(
context, consumers_to_check)
@classmethod
def get_all_by_resource_provider(cls, context, rp):
_create_incomplete_consumers_for_provider(context, rp.id)
db_allocs = _get_allocations_by_provider_id(context, rp.id)
# Build up a list of Allocation objects, setting the Allocation object
# fields to the same-named database record field we got from
# _get_allocations_by_provider_id(). We already have the
# ResourceProvider object so we just pass that object to the Allocation
# object constructor as-is
objs = []
for rec in db_allocs:
consumer = consumer_obj.Consumer(
context, id=rec['consumer_id'],
uuid=rec['consumer_uuid'],
generation=rec['consumer_generation'],
project=project_obj.Project(
context, id=rec['project_id'],
external_id=rec['project_external_id']),
user=user_obj.User(
context, id=rec['user_id'],
external_id=rec['user_external_id']))
objs.append(
Allocation(
context, id=rec['id'], resource_provider=rp,
resource_class=_RC_CACHE.string_from_id(
rec['resource_class_id']),
consumer=consumer,
used=rec['used']))
alloc_list = cls(context, objects=objs)
return alloc_list
@classmethod
def get_all_by_consumer_id(cls, context, consumer_id):
_create_incomplete_consumer(context, consumer_id)
db_allocs = _get_allocations_by_consumer_uuid(context, consumer_id)
if db_allocs:
# Build up the Consumer object (it's the same for all allocations
# since we looked up by consumer ID)
db_first = db_allocs[0]
consumer = consumer_obj.Consumer(
context, id=db_first['consumer_id'],
uuid=db_first['consumer_uuid'],
generation=db_first['consumer_generation'],
project=project_obj.Project(
context, id=db_first['project_id'],
external_id=db_first['project_external_id']),
user=user_obj.User(
context, id=db_first['user_id'],
external_id=db_first['user_external_id']))
# Build up a list of Allocation objects, setting the Allocation object
# fields to the same-named database record field we got from
# _get_allocations_by_consumer_id().
#
# NOTE(jaypipes): Unlike with get_all_by_resource_provider(), we do
# NOT already have the ResourceProvider object so we construct a new
# ResourceProvider object below by looking at the resource provider
# fields returned by _get_allocations_by_consumer_id().
objs = [
Allocation(
context, id=rec['id'],
resource_provider=ResourceProvider(
context,
id=rec['resource_provider_id'],
uuid=rec['resource_provider_uuid'],
name=rec['resource_provider_name'],
generation=rec['resource_provider_generation']),
resource_class=_RC_CACHE.string_from_id(
rec['resource_class_id']),
consumer=consumer,
used=rec['used'])
for rec in db_allocs
]
alloc_list = cls(context, objects=objs)
return alloc_list
def replace_all(self):
"""Replace the supplied allocations.
:note: This method always deletes all allocations for all consumers
referenced in the list of Allocation objects and then replaces
the consumer's allocations with the Allocation objects. In doing
so, it will end up setting the Allocation.id attribute of each
Allocation object.
"""
# Retry _set_allocations server side if there is a
# ResourceProviderConcurrentUpdateDetected. We don't care about
# sleeping, we simply want to reset the resource provider objects
# and try again. For sake of simplicity (and because we don't have
# easy access to the information) we reload all the resource
# providers that may be present.
retries = self.RP_CONFLICT_RETRY_COUNT
while retries:
retries -= 1
try:
self._set_allocations(self._context, self.objects)
break
except exception.ResourceProviderConcurrentUpdateDetected:
LOG.debug('Retrying allocations write on resource provider '
'generation conflict')
# We only want to reload each unique resource provider once.
alloc_rp_uuids = set(
alloc.resource_provider.uuid for alloc in self.objects)
seen_rps = {}
for rp_uuid in alloc_rp_uuids:
seen_rps[rp_uuid] = ResourceProvider.get_by_uuid(
self._context, rp_uuid)
for alloc in self.objects:
rp_uuid = alloc.resource_provider.uuid
alloc.resource_provider = seen_rps[rp_uuid]
else:
# We ran out of retries so we need to raise again.
# The log will automatically have request id info associated with
# it that will allow tracing back to specific allocations.
# Attempting to extract specific consumer or resource provider
# information from the allocations is not coherent as this
# could be multiple consumers and providers.
LOG.warning('Exceeded retry limit of %d on allocations write',
self.RP_CONFLICT_RETRY_COUNT)
raise exception.ResourceProviderConcurrentUpdateDetected()
def delete_all(self):
consumer_uuids = set(alloc.consumer.uuid for alloc in self.objects)
alloc_ids = [alloc.id for alloc in self.objects]
_delete_allocations_by_ids(self._context, alloc_ids)
consumer_obj.delete_consumers_if_no_allocations(
self._context, consumer_uuids)
def __repr__(self):
strings = [repr(x) for x in self.objects]
return "AllocationList[" + ", ".join(strings) + "]"
@base.VersionedObjectRegistry.register_if(False)
class Usage(base.VersionedObject):
fields = {
'resource_class': rc_fields.ResourceClassField(read_only=True),
'usage': fields.NonNegativeIntegerField(),
}
@staticmethod
def _from_db_object(context, target, source):
for field in target.fields:
if field not in ('resource_class'):
setattr(target, field, source[field])
if 'resource_class' not in target:
rc_str = _RC_CACHE.string_from_id(source['resource_class_id'])
target.resource_class = rc_str
target._context = context
target.obj_reset_changes()
return target
@base.VersionedObjectRegistry.register_if(False)
class UsageList(base.ObjectListBase, base.VersionedObject):
fields = {
'objects': fields.ListOfObjectsField('Usage'),
}
@staticmethod
@db_api.placement_context_manager.reader