Delete the placement code

This finalizes the removal of the placement code from nova.
This change primarily removes code and makes fixes to cmd,
test and migration tooling to adapt to the removal.

Placement tests and documention were already removed in
early patches.

A database migration that calls
consumer_obj.create_incomplete_consumers in nova-manage has been
removed.

A functional test which confirms the default incomplete
consumer user and project id has been changes so its its use of
conf.placement.incomplete_* (now removed) is replaced with a
constant. The placement server, running in the functional
test, provides its own config.

placement-related configuration is updated to only register those
opts which are relevant on the nova side. This mostly means
ksa-related opts. placement-database configuration is removed
from nova/conf/database.

tox.ini is updated to remove the group_regex required by the
placement gabbi tests. This should probably have gone when the
placement functional tests went, but was overlooked.

A release note is added which describes that this is cleanup,
the main action already happened, but points people to the
nova to placement upgrade instructions in case they haven't
done it yet.

Change-Id: I4181f39dea7eb10b84e6f5057938767b3e422aff
This commit is contained in:
Chris Dent 2019-01-31 10:50:10 +00:00
parent 760fc2de32
commit 70a2879b2c
73 changed files with 30 additions and 11933 deletions

1
.gitignore vendored
View File

@ -47,7 +47,6 @@ nova/vcsversion.py
tools/conf/nova.conf*
doc/source/_static/nova.conf.sample
doc/source/_static/nova.policy.yaml.sample
doc/source/_static/placement.policy.yaml.sample
# Files created by releasenotes build
releasenotes/build

View File

@ -55,7 +55,6 @@ sample_config_basename = '_static/nova'
policy_generator_config_file = [
('../../etc/nova/nova-policy-generator.conf', '_static/nova'),
('../../etc/nova/placement-policy-generator.conf', '_static/placement')
]
actdiag_html_image_format = 'SVG'

View File

@ -1,102 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystonemiddleware import auth_token
from oslo_log import log as logging
from oslo_middleware import request_id
import webob.dec
import webob.exc
from nova.api.openstack.placement import context
LOG = logging.getLogger(__name__)
class Middleware(object):
def __init__(self, application, **kwargs):
self.application = application
# NOTE(cdent): Only to be used in tests where auth is being faked.
class NoAuthMiddleware(Middleware):
"""Require a token if one isn't present."""
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
if req.environ['PATH_INFO'] == '/':
return self.application
if 'X-Auth-Token' not in req.headers:
return webob.exc.HTTPUnauthorized()
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
if user_id == 'admin':
roles = ['admin']
else:
roles = []
req.headers['X_USER_ID'] = user_id
req.headers['X_TENANT_ID'] = project_id
req.headers['X_ROLES'] = ','.join(roles)
return self.application
class PlacementKeystoneContext(Middleware):
"""Make a request context from keystone headers."""
@webob.dec.wsgify
def __call__(self, req):
req_id = req.environ.get(request_id.ENV_REQUEST_ID)
ctx = context.RequestContext.from_environ(
req.environ, request_id=req_id)
if ctx.user_id is None and req.environ['PATH_INFO'] != '/':
LOG.debug("Neither X_USER_ID nor X_USER found in request")
return webob.exc.HTTPUnauthorized()
req.environ['placement.context'] = ctx
return self.application
class PlacementAuthProtocol(auth_token.AuthProtocol):
"""A wrapper on Keystone auth_token middleware.
Does not perform verification of authentication tokens
for root in the API.
"""
def __init__(self, app, conf):
self._placement_app = app
super(PlacementAuthProtocol, self).__init__(app, conf)
def __call__(self, environ, start_response):
if environ['PATH_INFO'] == '/':
return self._placement_app(environ, start_response)
return super(PlacementAuthProtocol, self).__call__(
environ, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return PlacementAuthProtocol(app, conf)
return auth_filter

View File

@ -1,52 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import policy
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext):
def can(self, action, target=None, fatal=True):
"""Verifies that the given action is valid on the target in this
context.
:param action: string representing the action to be checked.
:param target: As much information about the object being operated on
as possible. The target argument should be a dict instance or an
instance of a class that fully supports the Mapping abstract base
class and deep copying. For object creation this should be a
dictionary representing the location of the object e.g.
``{'project_id': context.project_id}``. If None, then this default
target will be considered::
{'project_id': self.project_id, 'user_id': self.user_id}
:param fatal: if False, will return False when an
exception.PolicyNotAuthorized occurs.
:raises nova.api.openstack.placement.exception.PolicyNotAuthorized:
if verification fails and fatal is True.
:return: returns a non-False value (not necessarily "True") if
authorized and False if not authorized and fatal is False.
"""
if target is None:
target = {'project_id': self.project_id,
'user_id': self.user_id}
try:
return policy.authorize(self, action, target)
except exception.PolicyNotAuthorized:
if fatal:
raise
return False

View File

@ -1,48 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database context manager for placement database connection, kept in its
own file so the nova db_api (which has cascading imports) is not imported.
"""
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from nova.utils import run_once
placement_context_manager = enginefacade.transaction_context()
LOG = logging.getLogger(__name__)
def _get_db_conf(conf_group):
return dict(conf_group.items())
@run_once("TransactionFactory already started, not reconfiguring.",
LOG.warning)
def configure(conf):
# If [placement_database]/connection is not set in conf, then placement
# data will be stored in the nova_api database.
if conf.placement_database.connection is None:
placement_context_manager.configure(
**_get_db_conf(conf.api_database))
else:
placement_context_manager.configure(
**_get_db_conf(conf.placement_database))
def get_placement_engine():
return placement_context_manager.writer.get_engine()
@enginefacade.transaction_context_provider
class DbContext(object):
"""Stub class for db session handling outside of web requests."""

View File

@ -1,120 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Deployment handling for Placmenent API."""
from microversion_parse import middleware as mp_middleware
import oslo_middleware
from oslo_middleware import cors
from nova.api.openstack.placement import auth
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import fault_wrap
from nova.api.openstack.placement import handler
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider
from nova.api.openstack.placement import requestlog
from nova.api.openstack.placement import util
# TODO(cdent): NAME points to the config project being used, so for
# now this is "nova" but we probably want "placement" eventually.
NAME = "nova"
def deploy(conf):
"""Assemble the middleware pipeline leading to the placement app."""
if conf.api.auth_strategy == 'noauth2':
auth_middleware = auth.NoAuthMiddleware
else:
# Do not use 'oslo_config_project' param here as the conf
# location may have been overridden earlier in the deployment
# process with OS_PLACEMENT_CONFIG_DIR in wsgi.py.
auth_middleware = auth.filter_factory(
{}, oslo_config_config=conf)
# Pass in our CORS config, if any, manually as that's a)
# explicit, b) makes testing more straightfoward, c) let's
# us control the use of cors by the presence of its config.
conf.register_opts(cors.CORS_OPTS, 'cors')
if conf.cors.allowed_origin:
cors_middleware = oslo_middleware.CORS.factory(
{}, **conf.cors)
else:
cors_middleware = None
context_middleware = auth.PlacementKeystoneContext
req_id_middleware = oslo_middleware.RequestId
microversion_middleware = mp_middleware.MicroversionMiddleware
fault_middleware = fault_wrap.FaultWrapper
request_log = requestlog.RequestLog
application = handler.PlacementHandler()
# configure microversion middleware in the old school way
application = microversion_middleware(
application, microversion.SERVICE_TYPE, microversion.VERSIONS,
json_error_formatter=util.json_error_formatter)
# NOTE(cdent): The ordering here is important. The list is ordered
# from the inside out. For a single request req_id_middleware is called
# first and microversion_middleware last. Then the request is finally
# passed to the application (the PlacementHandler). At that point
# the response ascends the middleware in the reverse of the
# order the request went in. This order ensures that log messages
# all see the same contextual information including request id and
# authentication information.
for middleware in (fault_middleware,
request_log,
context_middleware,
auth_middleware,
cors_middleware,
req_id_middleware,
):
if middleware:
application = middleware(application)
# NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
if not conf.oslo_policy.enforce_scope:
import warnings
warnings.filterwarnings('ignore',
message="Policy .* failed scope check",
category=UserWarning)
return application
def update_database():
"""Do any database updates required at process boot time, such as
updating the traits table.
"""
ctx = db_api.DbContext()
resource_provider.ensure_trait_sync(ctx)
resource_provider.ensure_rc_cache(ctx)
# NOTE(cdent): Althought project_name is no longer used because of the
# resolution of https://bugs.launchpad.net/nova/+bug/1734491, loadapp()
# is considered a public interface for the creation of a placement
# WSGI app so must maintain its interface. The canonical placement WSGI
# app is created by init_application in wsgi.py, but this is not
# required and in fact can be limiting. loadapp() may be used from
# fixtures or arbitrary WSGI frameworks and loaders.
def loadapp(config, project_name=NAME):
"""WSGI application creator for placement.
:param config: An olso_config.cfg.ConfigOpts containing placement
configuration.
:param project_name: oslo_config project name. Ignored, preserved for
backwards compatibility
"""
application = deploy(config)
update_database()
return application

View File

@ -1,94 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Call any URI in the placement service directly without real HTTP.
This is useful for those cases where processes wish to manipulate the
Placement datastore but do not want to run Placement as a long running
service. A PlacementDirect context manager is provided. Within that
HTTP requests may be made as normal but they will not actually traverse
a real socket.
"""
from keystoneauth1 import adapter
from keystoneauth1 import session
import mock
from oslo_utils import uuidutils
import requests
from wsgi_intercept import interceptor
from nova.api.openstack.placement import deploy
class PlacementDirect(interceptor.RequestsInterceptor):
"""Provide access to the placement service without real HTTP.
wsgi-intercept is used to provide a keystoneauth1 Adapter that has access
to an in-process placement service. This provides access to making changes
to the placement database without requiring HTTP over the network - it
remains in-process.
Authentication to the service is turned off; admin access is assumed.
Access is provided via a context manager which is responsible for
turning the wsgi-intercept on and off, and setting and removing
mocks required to keystoneauth1 to work around endpoint discovery.
Example::
with PlacementDirect(cfg.CONF, latest_microversion=True) as client:
allocations = client.get('/allocations/%s' % consumer)
:param conf: An oslo config with the options used to configure
the placement service (notably database connection
string).
:param latest_microversion: If True, API requests will use the latest
microversion if not otherwise specified. If
False (the default), the base microversion is
the default.
"""
def __init__(self, conf, latest_microversion=False):
conf.set_override('auth_strategy', 'noauth2', group='api')
app = lambda: deploy.loadapp(conf)
self.url = 'http://%s/placement' % str(uuidutils.generate_uuid())
# Supply our own session so the wsgi-intercept can intercept
# the right thing.
request_session = requests.Session()
headers = {
'x-auth-token': 'admin',
}
# TODO(efried): See below
if latest_microversion:
headers['OpenStack-API-Version'] = 'placement latest'
self.adapter = adapter.Adapter(
session.Session(auth=None, session=request_session,
additional_headers=headers),
service_type='placement', raise_exc=False)
# TODO(efried): Figure out why this isn't working:
# default_microversion='latest' if latest_microversion else None)
self._mocked_endpoint = mock.patch(
'keystoneauth1.session.Session.get_endpoint',
new=mock.Mock(return_value=self.url))
super(PlacementDirect, self).__init__(app, url=self.url)
def __enter__(self):
"""Start the wsgi-intercept interceptor and keystone endpoint mock.
A no auth ksa Adapter is provided to the context being managed.
"""
super(PlacementDirect, self).__enter__()
self._mocked_endpoint.start()
return self.adapter
def __exit__(self, *exc):
self._mocked_endpoint.stop()
return super(PlacementDirect, self).__exit__(*exc)

View File

@ -1,48 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Error code symbols to be used in structured JSON error responses.
These are strings to be used in the 'code' attribute, as described by
the API guideline on `errors`_.
There must be only one instance of any string value and it should have
only one associated constant SYMBOL.
In a WSGI handler (representing the sole handler for an HTTP method and
URI) each error condition should get a separate error code. Reusing an
error code in a different handler is not just acceptable, but useful.
For example 'placement.inventory.inuse' is meaningful and correct in both
``PUT /resource_providers/{uuid}/inventories`` and ``DELETE`` on the same
URI.
.. _errors: http://specs.openstack.org/openstack/api-wg/guidelines/errors.html
"""
# NOTE(cdent): This is the simplest thing that can possibly work, for now.
# If it turns out we want to automate this, or put different resources in
# different files, or otherwise change things, that's fine. The only thing
# that needs to be maintained as the same are the strings that API end
# users use. How they are created is completely fungible.
# Do not change the string values. Once set, they are set.
# Do not reuse string values. There should be only one symbol for any
# value.
DEFAULT = 'placement.undefined_code'
INVENTORY_INUSE = 'placement.inventory.inuse'
CONCURRENT_UPDATE = 'placement.concurrent_update'
DUPLICATE_NAME = 'placement.duplicate_name'
PROVIDER_IN_USE = 'placement.resource_provider.inuse'
PROVIDER_CANNOT_DELETE_PARENT = (
'placement.resource_provider.cannot_delete_parent')
RESOURCE_PROVIDER_NOT_FOUND = 'placement.resource_provider.not_found'

View File

@ -1,207 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Exceptions for use in the Placement API."""
# NOTE(cdent): The exceptions are copied from nova.exception, where they
# were originally used. To prepare for extracting placement to its own
# repository we wish to no longer do that. Instead, exceptions used by
# placement should be in the placement hierarchy.
from oslo_log import log as logging
from nova.i18n import _
LOG = logging.getLogger(__name__)
class _BaseException(Exception):
"""Base Exception
To correctly use this class, inherit from it and define
a 'msg_fmt' property. That msg_fmt will get printf'd
with the keyword arguments provided to the constructor.
"""
msg_fmt = _("An unknown exception occurred.")
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
try:
message = self.msg_fmt % kwargs
except Exception:
# NOTE(melwitt): This is done in a separate method so it can be
# monkey-patched during testing to make it a hard failure.
self._log_exception()
message = self.msg_fmt
self.message = message
super(_BaseException, self).__init__(message)
def _log_exception(self):
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in self.kwargs.items():
LOG.error("%s: %s" % (name, value)) # noqa
def format_message(self):
# Use the first argument to the python Exception object which
# should be our full exception message, (see __init__).
return self.args[0]
class NotFound(_BaseException):
msg_fmt = _("Resource could not be found.")
class Exists(_BaseException):
msg_fmt = _("Resource already exists.")
class InvalidInventory(_BaseException):
msg_fmt = _("Inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s' invalid.")
class CannotDeleteParentResourceProvider(_BaseException):
msg_fmt = _("Cannot delete resource provider that is a parent of "
"another. Delete child providers first.")
class ConcurrentUpdateDetected(_BaseException):
msg_fmt = _("Another thread concurrently updated the data. "
"Please retry your update")
class ResourceProviderConcurrentUpdateDetected(ConcurrentUpdateDetected):
msg_fmt = _("Another thread concurrently updated the resource provider "
"data. Please retry your update")
class InvalidAllocationCapacityExceeded(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would exceed the capacity.")
class InvalidAllocationConstraintsViolated(InvalidInventory):
msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. The requested "
"amount would violate inventory constraints.")
class InvalidInventoryCapacity(InvalidInventory):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than or equal to total.")
class InvalidInventoryCapacityReservedCanBeTotal(InvalidInventoryCapacity):
msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
"resource provider '%(resource_provider)s'. "
"The reserved value is greater than total.")
# An exception with this name is used on both sides of the placement/
# nova interaction.
class InventoryInUse(InvalidInventory):
# NOTE(mriedem): This message cannot change without impacting the
# nova.scheduler.client.report._RE_INV_IN_USE regex.
msg_fmt = _("Inventory for '%(resource_classes)s' on "
"resource provider '%(resource_provider)s' in use.")
class InventoryWithResourceClassNotFound(NotFound):
msg_fmt = _("No inventory of class %(resource_class)s found.")
class MaxDBRetriesExceeded(_BaseException):
msg_fmt = _("Max retries of DB transaction exceeded attempting to "
"perform %(action)s.")
class ObjectActionError(_BaseException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class PolicyNotAuthorized(_BaseException):
msg_fmt = _("Policy does not allow %(action)s to be performed.")
class ResourceClassCannotDeleteStandard(_BaseException):
msg_fmt = _("Cannot delete standard resource class %(resource_class)s.")
class ResourceClassCannotUpdateStandard(_BaseException):
msg_fmt = _("Cannot update standard resource class %(resource_class)s.")
class ResourceClassExists(_BaseException):
msg_fmt = _("Resource class %(resource_class)s already exists.")
class ResourceClassInUse(_BaseException):
msg_fmt = _("Cannot delete resource class %(resource_class)s. "
"Class is in use in inventory.")
class ResourceClassNotFound(NotFound):
msg_fmt = _("No such resource class %(resource_class)s.")
# An exception with this name is used on both sides of the placement/
# nova interaction.
class ResourceProviderInUse(_BaseException):
msg_fmt = _("Resource provider has allocations.")
class TraitCannotDeleteStandard(_BaseException):
msg_fmt = _("Cannot delete standard trait %(name)s.")
class TraitExists(_BaseException):
msg_fmt = _("The Trait %(name)s already exists")
class TraitInUse(_BaseException):
msg_fmt = _("The trait %(name)s is in use by a resource provider.")
class TraitNotFound(NotFound):
msg_fmt = _("No such trait(s): %(names)s.")
class ProjectNotFound(NotFound):
msg_fmt = _("No such project(s): %(external_id)s.")
class ProjectExists(Exists):
msg_fmt = _("The project %(external_id)s already exists.")
class UserNotFound(NotFound):
msg_fmt = _("No such user(s): %(external_id)s.")
class UserExists(Exists):
msg_fmt = _("The user %(external_id)s already exists.")
class ConsumerNotFound(NotFound):
msg_fmt = _("No such consumer(s): %(uuid)s.")
class ConsumerExists(Exists):
msg_fmt = _("The consumer %(uuid)s already exists.")

View File

@ -1,48 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple middleware for safely catching unexpected exceptions."""
# NOTE(cdent): This is a super simplified replacement for the nova
# FaultWrapper, which does more than placement needs.
from oslo_log import log as logging
import six
from webob import exc
from nova.api.openstack.placement import util
LOG = logging.getLogger(__name__)
class FaultWrapper(object):
"""Turn an uncaught exception into a status 500.
Uncaught exceptions usually shouldn't happen, if it does it
means there is a bug in the placement service, which should be
fixed.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
try:
return self.application(environ, start_response)
except Exception as unexpected_exception:
LOG.exception('Placement API unexpected error: %s',
unexpected_exception)
formatted_exception = exc.HTTPInternalServerError(
six.text_type(unexpected_exception))
formatted_exception.json_formatter = util.json_error_formatter
return formatted_exception.generate_response(
environ, start_response)

View File

@ -1,231 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handlers for placement API.
Individual handlers are associated with URL paths in the
ROUTE_DECLARATIONS dictionary. At the top level each key is a Routes
compliant path. The value of that key is a dictionary mapping
individual HTTP request methods to a Python function representing a
simple WSGI application for satisfying that request.
The ``make_map`` method processes ROUTE_DECLARATIONS to create a
Routes.Mapper, including automatic handlers to respond with a
405 when a request is made against a valid URL with an invalid
method.
"""
import routes
import webob
from oslo_log import log as logging
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.handlers import aggregate
from nova.api.openstack.placement.handlers import allocation
from nova.api.openstack.placement.handlers import allocation_candidate
from nova.api.openstack.placement.handlers import inventory
from nova.api.openstack.placement.handlers import reshaper
from nova.api.openstack.placement.handlers import resource_class
from nova.api.openstack.placement.handlers import resource_provider
from nova.api.openstack.placement.handlers import root
from nova.api.openstack.placement.handlers import trait
from nova.api.openstack.placement.handlers import usage
from nova.api.openstack.placement import util
from nova.i18n import _
LOG = logging.getLogger(__name__)
# URLs and Handlers
# NOTE(cdent): When adding URLs here, do not use regex patterns in
# the path parameters (e.g. {uuid:[0-9a-zA-Z-]+}) as that will lead
# to 404s that are controlled outside of the individual resources
# and thus do not include specific information on the why of the 404.
ROUTE_DECLARATIONS = {
'/': {
'GET': root.home,
},
# NOTE(cdent): This allows '/placement/' and '/placement' to
# both work as the root of the service, which we probably want
# for those situations where the service is mounted under a
# prefix (as it is in devstack). While weird, an empty string is
# a legit key in a dictionary and matches as desired in Routes.
'': {
'GET': root.home,
},
'/resource_classes': {
'GET': resource_class.list_resource_classes,
'POST': resource_class.create_resource_class
},
'/resource_classes/{name}': {
'GET': resource_class.get_resource_class,
'PUT': resource_class.update_resource_class,
'DELETE': resource_class.delete_resource_class,
},
'/resource_providers': {
'GET': resource_provider.list_resource_providers,
'POST': resource_provider.create_resource_provider
},
'/resource_providers/{uuid}': {
'GET': resource_provider.get_resource_provider,
'DELETE': resource_provider.delete_resource_provider,
'PUT': resource_provider.update_resource_provider
},
'/resource_providers/{uuid}/inventories': {
'GET': inventory.get_inventories,
'POST': inventory.create_inventory,
'PUT': inventory.set_inventories,
'DELETE': inventory.delete_inventories
},
'/resource_providers/{uuid}/inventories/{resource_class}': {
'GET': inventory.get_inventory,
'PUT': inventory.update_inventory,
'DELETE': inventory.delete_inventory
},
'/resource_providers/{uuid}/usages': {
'GET': usage.list_usages
},
'/resource_providers/{uuid}/aggregates': {
'GET': aggregate.get_aggregates,
'PUT': aggregate.set_aggregates
},
'/resource_providers/{uuid}/allocations': {
'GET': allocation.list_for_resource_provider,
},
'/allocations': {
'POST': allocation.set_allocations,
},
'/allocations/{consumer_uuid}': {
'GET': allocation.list_for_consumer,
'PUT': allocation.set_allocations_for_consumer,
'DELETE': allocation.delete_allocations,
},
'/allocation_candidates': {
'GET': allocation_candidate.list_allocation_candidates,
},
'/traits': {
'GET': trait.list_traits,
},
'/traits/{name}': {
'GET': trait.get_trait,
'PUT': trait.put_trait,
'DELETE': trait.delete_trait,
},
'/resource_providers/{uuid}/traits': {
'GET': trait.list_traits_for_resource_provider,
'PUT': trait.update_traits_for_resource_provider,
'DELETE': trait.delete_traits_for_resource_provider
},
'/usages': {
'GET': usage.get_total_usages,
},
'/reshaper': {
'POST': reshaper.reshape,
},
}
def dispatch(environ, start_response, mapper):
"""Find a matching route for the current request.
If no match is found, raise a 404 response.
If there is a matching route, but no matching handler
for the given method, raise a 405.
"""
result = mapper.match(environ=environ)
if result is None:
raise webob.exc.HTTPNotFound(
json_formatter=util.json_error_formatter)
# We can't reach this code without action being present.
handler = result.pop('action')
environ['wsgiorg.routing_args'] = ((), result)
return handler(environ, start_response)
def handle_405(environ, start_response):
"""Return a 405 response when method is not allowed.
If _methods are in routing_args, send an allow header listing
the methods that are possible on the provided URL.
"""
_methods = util.wsgi_path_item(environ, '_methods')
headers = {}
if _methods:
# Ensure allow header is a python 2 or 3 native string (thus
# not unicode in python 2 but stay a string in python 3)
# In the process done by Routes to save the allowed methods
# to its routing table they become unicode in py2.
headers['allow'] = str(_methods)
# Use Exception class as WSGI Application. We don't want to raise here.
response = webob.exc.HTTPMethodNotAllowed(
_('The method specified is not allowed for this resource.'),
headers=headers, json_formatter=util.json_error_formatter)
return response(environ, start_response)
def make_map(declarations):
"""Process route declarations to create a Route Mapper."""
mapper = routes.Mapper()
for route, targets in declarations.items():
allowed_methods = []
for method in targets:
mapper.connect(route, action=targets[method],
conditions=dict(method=[method]))
allowed_methods.append(method)
allowed_methods = ', '.join(allowed_methods)
mapper.connect(route, action=handle_405, _methods=allowed_methods)
return mapper
class PlacementHandler(object):
"""Serve Placement API.
Dispatch to handlers defined in ROUTE_DECLARATIONS.
"""
def __init__(self, **local_config):
# NOTE(cdent): Local config currently unused.
self._map = make_map(ROUTE_DECLARATIONS)
def __call__(self, environ, start_response):
# Check that an incoming request with a content-length header
# that is an integer > 0 and not empty, also has a content-type
# header that is not empty. If not raise a 400.
clen = environ.get('CONTENT_LENGTH')
try:
if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'):
raise webob.exc.HTTPBadRequest(
_('content-type header required when content-length > 0'),
json_formatter=util.json_error_formatter)
except ValueError as exc:
raise webob.exc.HTTPBadRequest(
_('content-length header must be an integer'),
json_formatter=util.json_error_formatter)
try:
return dispatch(environ, start_response, self._map)
# Trap the NotFound exceptions raised by the objects used
# with the API and transform them into webob.exc.HTTPNotFound.
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
exc, json_formatter=util.json_error_formatter)
except exception.PolicyNotAuthorized as exc:
raise webob.exc.HTTPForbidden(
exc.format_message(),
json_formatter=util.json_error_formatter)
# Remaining uncaught exceptions will rise first to the Microversion
# middleware, where any WebOb generated exceptions will be caught and
# transformed into legit HTTP error responses (with microversion
# headers added), and then to the FaultWrapper middleware which will
# catch anything else and transform them into 500 responses.
# NOTE(cdent): There should be very few uncaught exceptions which are
# not WebOb exceptions at this stage as the handlers are contained by
# the wsgify decorator which will transform those exceptions to
# responses itself.

View File

@ -1,133 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Aggregate handlers for Placement API."""
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import aggregate as policies
from nova.api.openstack.placement.schemas import aggregate as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
_INCLUDE_GENERATION_VERSION = (1, 19)
def _send_aggregates(req, resource_provider, aggregate_uuids):
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
response = req.response
response.status = 200
payload = _serialize_aggregates(aggregate_uuids)
if want_version.matches(min_version=_INCLUDE_GENERATION_VERSION):
payload['resource_provider_generation'] = resource_provider.generation
response.body = encodeutils.to_utf8(
jsonutils.dumps(payload))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
req.response.cache_control = 'no-cache'
# We never get an aggregate itself, we get the list of aggregates
# that are associated with a resource provider. We don't record the
# time when that association was made and the time when an aggregate
# uuid was created is not relevant, so here we punt and use utcnow.
req.response.last_modified = timeutils.utcnow(with_timezone=True)
return response
def _serialize_aggregates(aggregate_uuids):
return {'aggregates': aggregate_uuids}
def _set_aggregates(resource_provider, aggregate_uuids,
increment_generation=False):
"""Set aggregates for the resource provider.
If increment generation is true, the resource provider generation
will be incremented if possible. If that fails (because something
else incremented the generation in another thread), a
ConcurrentUpdateDetected will be raised.
"""
# NOTE(cdent): It's not clear what the DBDuplicateEntry handling
# is doing here, set_aggregates already handles that, but I'm leaving
# it here because it was already there.
try:
resource_provider.set_aggregates(
aggregate_uuids, increment_generation=increment_generation)
except exception.ConcurrentUpdateDetected as exc:
raise webob.exc.HTTPConflict(
_('Update conflict: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
except db_exc.DBDuplicateEntry as exc:
raise webob.exc.HTTPConflict(
_('Update conflict: %(error)s') % {'error': exc})
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
@microversion.version_handler('1.1')
def get_aggregates(req):
"""GET a list of aggregates associated with a resource provider.
If the resource provider does not exist return a 404.
On success return a 200 with an application/json body containing a
list of aggregate uuids.
"""
context = req.environ['placement.context']
context.can(policies.LIST)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
aggregate_uuids = resource_provider.get_aggregates()
return _send_aggregates(req, resource_provider, aggregate_uuids)
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
@microversion.version_handler('1.1')
def set_aggregates(req):
context = req.environ['placement.context']
context.can(policies.UPDATE)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
consider_generation = want_version.matches(
min_version=_INCLUDE_GENERATION_VERSION)
put_schema = schema.PUT_AGGREGATES_SCHEMA_V1_1
if consider_generation:
put_schema = schema.PUT_AGGREGATES_SCHEMA_V1_19
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
data = util.extract_json(req.body, put_schema)
if consider_generation:
# Check for generation conflict
rp_gen = data['resource_provider_generation']
if resource_provider.generation != rp_gen:
raise webob.exc.HTTPConflict(
_("Resource provider's generation already changed. Please "
"update the generation and try again."),
comment=errors.CONCURRENT_UPDATE)
aggregate_uuids = data['aggregates']
else:
aggregate_uuids = data
_set_aggregates(resource_provider, aggregate_uuids,
increment_generation=consider_generation)
return _send_aggregates(req, resource_provider, aggregate_uuids)

View File

@ -1,576 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handlers for setting and deleting allocations."""
import collections
import uuid
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import allocation as policies
from nova.api.openstack.placement.schemas import allocation as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
LOG = logging.getLogger(__name__)
def _last_modified_from_allocations(allocations, want_version):
"""Given a set of allocation objects, returns the last modified timestamp.
"""
# NOTE(cdent): The last_modified for an allocation will always be
# based off the created_at column because allocations are only
# ever inserted, never updated.
last_modified = None
# Only calculate last-modified if we are using a microversion that
# supports it.
get_last_modified = want_version and want_version.matches((1, 15))
for allocation in allocations:
if get_last_modified:
last_modified = util.pick_last_modified(last_modified, allocation)
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return last_modified
def _serialize_allocations_for_consumer(allocations, want_version):
"""Turn a list of allocations into a dict by resource provider uuid.
{
'allocations': {
RP_UUID_1: {
'generation': GENERATION,
'resources': {
'DISK_GB': 4,
'VCPU': 2
}
},
RP_UUID_2: {
'generation': GENERATION,
'resources': {
'DISK_GB': 6,
'VCPU': 3
}
}
},
# project_id and user_id are added with microverion 1.12
'project_id': PROJECT_ID,
'user_id': USER_ID,
# Generation for consumer >= 1.28
'consumer_generation': 1
}
"""
allocation_data = collections.defaultdict(dict)
for allocation in allocations:
key = allocation.resource_provider.uuid
if 'resources' not in allocation_data[key]:
allocation_data[key]['resources'] = {}
resource_class = allocation.resource_class
allocation_data[key]['resources'][resource_class] = allocation.used
generation = allocation.resource_provider.generation
allocation_data[key]['generation'] = generation
result = {'allocations': allocation_data}
if allocations and want_version.matches((1, 12)):
# We're looking at a list of allocations by consumer id so project and
# user are consistent across the list
consumer = allocations[0].consumer
project_id = consumer.project.external_id
user_id = consumer.user.external_id
result['project_id'] = project_id
result['user_id'] = user_id
show_consumer_gen = want_version.matches((1, 28))
if show_consumer_gen:
result['consumer_generation'] = consumer.generation
return result
def _serialize_allocations_for_resource_provider(allocations,
resource_provider,
want_version):
"""Turn a list of allocations into a dict by consumer id.
{'resource_provider_generation': GENERATION,
'allocations':
CONSUMER_ID_1: {
'resources': {
'DISK_GB': 4,
'VCPU': 2
},
# Generation for consumer >= 1.28
'consumer_generation': 0
},
CONSUMER_ID_2: {
'resources': {
'DISK_GB': 6,
'VCPU': 3
},
# Generation for consumer >= 1.28
'consumer_generation': 0
}
}
"""
show_consumer_gen = want_version.matches((1, 28))
allocation_data = collections.defaultdict(dict)
for allocation in allocations:
key = allocation.consumer.uuid
if 'resources' not in allocation_data[key]:
allocation_data[key]['resources'] = {}
resource_class = allocation.resource_class
allocation_data[key]['resources'][resource_class] = allocation.used
if show_consumer_gen:
consumer_gen = None
if allocation.consumer is not None:
consumer_gen = allocation.consumer.generation
allocation_data[key]['consumer_generation'] = consumer_gen
result = {'allocations': allocation_data}
result['resource_provider_generation'] = resource_provider.generation
return result
# TODO(cdent): Extracting this is useful, for reuse by reshaper code,
# but having it in this file seems wrong, however, since it uses
# _new_allocations it's being left here for now. We need a place for shared
# handler code, but util.py is already too big and too diverse.
def create_allocation_list(context, data, consumers):
"""Create an AllocationList based on provided data.
:param context: The placement context.
:param data: A dictionary of multiple allocations by consumer uuid.
:param consumers: A dictionary, keyed by consumer UUID, of Consumer objects
:return: An AllocationList.
:raises: `webob.exc.HTTPBadRequest` if a resource provider included in the
allocations does not exist.
"""
allocation_objects = []
for consumer_uuid in data:
allocations = data[consumer_uuid]['allocations']
consumer = consumers[consumer_uuid]
if allocations:
rp_objs = _resource_providers_by_uuid(context, allocations.keys())
for resource_provider_uuid in allocations:
resource_provider = rp_objs[resource_provider_uuid]
resources = allocations[resource_provider_uuid]['resources']
new_allocations = _new_allocations(context,
resource_provider,
consumer,
resources)
allocation_objects.extend(new_allocations)
else:
# The allocations are empty, which means wipe them out.
# Internal to the allocation object this is signalled by a
# used value of 0.
allocations = rp_obj.AllocationList.get_all_by_consumer_id(
context, consumer_uuid)
for allocation in allocations:
allocation.used = 0
allocation_objects.append(allocation)
return rp_obj.AllocationList(context, objects=allocation_objects)
def inspect_consumers(context, data, want_version):
"""Look at consumer data in allocations and create consumers as needed.
Keep a record of the consumers that are created in case they need
to be removed later.
If an exception is raised by ensure_consumer, commonly HTTPConflict but
also anything else, the newly created consumers will be deleted and the
exception reraised to the caller.
:param context: The placement context.
:param data: A dictionary of multiple allocations by consumer uuid.
:param want_version: the microversion matcher.
:return: A tuple of a dict of all consumer objects (by consumer uuid)
and a list of those consumer objects which are new.
"""
# First, ensure that all consumers referenced in the payload actually
# exist. And if not, create them. Keep a record of auto-created consumers
# so we can clean them up if the end allocation replace_all() fails.
consumers = {} # dict of Consumer objects, keyed by consumer UUID
new_consumers_created = []
for consumer_uuid in data:
project_id = data[consumer_uuid]['project_id']
user_id = data[consumer_uuid]['user_id']
consumer_generation = data[consumer_uuid].get('consumer_generation')
try:
consumer, new_consumer_created = util.ensure_consumer(
context, consumer_uuid, project_id, user_id,
consumer_generation, want_version)
if new_consumer_created:
new_consumers_created.append(consumer)
consumers[consumer_uuid] = consumer
except Exception:
# If any errors (for instance, a consumer generation conflict)
# occur when ensuring consumer records above, make sure we delete
# any auto-created consumers.
with excutils.save_and_reraise_exception():
delete_consumers(new_consumers_created)
return consumers, new_consumers_created
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def list_for_consumer(req):
"""List allocations associated with a consumer."""
context = req.environ['placement.context']
context.can(policies.ALLOC_LIST)
consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid')
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# NOTE(cdent): There is no way for a 404 to be returned here,
# only an empty result. We do not have a way to validate a
# consumer id.
allocations = rp_obj.AllocationList.get_all_by_consumer_id(
context, consumer_id)
output = _serialize_allocations_for_consumer(allocations, want_version)
last_modified = _last_modified_from_allocations(allocations, want_version)
allocations_json = jsonutils.dumps(output)
response = req.response
response.status = 200
response.body = encodeutils.to_utf8(allocations_json)
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def list_for_resource_provider(req):
"""List allocations associated with a resource provider."""
# TODO(cdent): On a shared resource provider (for example a
# giant disk farm) this list could get very long. At the moment
# we have no facility for limiting the output. Given that we are
# using a dict of dicts for the output we are potentially limiting
# ourselves in terms of sorting and filtering.
context = req.environ['placement.context']
context.can(policies.RP_ALLOC_LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
# confirm existence of resource provider so we get a reasonable
# 404 instead of empty list
try:
rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("Resource provider '%(rp_uuid)s' not found: %(error)s") %
{'rp_uuid': uuid, 'error': exc})
allocs = rp_obj.AllocationList.get_all_by_resource_provider(context, rp)
output = _serialize_allocations_for_resource_provider(
allocs, rp, want_version)
last_modified = _last_modified_from_allocations(allocs, want_version)
allocations_json = jsonutils.dumps(output)
response = req.response
response.status = 200
response.body = encodeutils.to_utf8(allocations_json)
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
def _resource_providers_by_uuid(ctx, rp_uuids):
"""Helper method that returns a dict, keyed by resource provider UUID, of
ResourceProvider objects.
:param ctx: The placement context.
:param rp_uuids: iterable of UUIDs for providers to fetch.
:raises: `webob.exc.HTTPBadRequest` if any of the UUIDs do not refer to
an existing resource provider.
"""
res = {}
for rp_uuid in rp_uuids:
# TODO(jaypipes): Clearly, this is not efficient to do one query for
# each resource provider UUID in the allocations instead of doing a
# single query for all the UUIDs. However, since
# ResourceProviderList.get_all_by_filters() is way too complicated for
# this purpose and doesn't raise NotFound anyway, we'll do this.
# Perhaps consider adding a ResourceProviderList.get_all_by_uuids()
# later on?
try:
res[rp_uuid] = rp_obj.ResourceProvider.get_by_uuid(ctx, rp_uuid)
except exception.NotFound:
raise webob.exc.HTTPBadRequest(
_("Allocation for resource provider '%(rp_uuid)s' "
"that does not exist.") %
{'rp_uuid': rp_uuid})
return res
def _new_allocations(context, resource_provider, consumer, resources):
"""Create new allocation objects for a set of resources
Returns a list of Allocation objects
:param context: The placement context.
:param resource_provider: The resource provider that has the resources.
:param consumer: The Consumer object consuming the resources.
:param resources: A dict of resource classes and values.
"""
allocations = []
for resource_class in resources:
allocation = rp_obj.Allocation(
resource_provider=resource_provider,
consumer=consumer,
resource_class=resource_class,
used=resources[resource_class])
allocations.append(allocation)
return allocations
def delete_consumers(consumers):
"""Helper function that deletes any consumer object supplied to it
:param consumers: iterable of Consumer objects to delete
"""
for consumer in consumers:
try:
consumer.delete()
LOG.debug("Deleted auto-created consumer with consumer UUID "
"%s after failed allocation", consumer.uuid)
except Exception as err:
LOG.warning("Got an exception when deleting auto-created "
"consumer with UUID %s: %s", consumer.uuid, err)
def _set_allocations_for_consumer(req, schema):
context = req.environ['placement.context']
context.can(policies.ALLOC_UPDATE)
consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
if not uuidutils.is_uuid_like(consumer_uuid):
raise webob.exc.HTTPBadRequest(
_('Malformed consumer_uuid: %(consumer_uuid)s') %
{'consumer_uuid': consumer_uuid})
consumer_uuid = str(uuid.UUID(consumer_uuid))
data = util.extract_json(req.body, schema)
allocation_data = data['allocations']
# Normalize allocation data to dict.
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
if not want_version.matches((1, 12)):
allocations_dict = {}
# Allocation are list-ish, transform to dict-ish
for allocation in allocation_data:
resource_provider_uuid = allocation['resource_provider']['uuid']
allocations_dict[resource_provider_uuid] = {
'resources': allocation['resources']
}
allocation_data = allocations_dict
allocation_objects = []
# Consumer object saved in case we need to delete the auto-created consumer
# record
consumer = None
# Whether we created a new consumer record
created_new_consumer = False
if not allocation_data:
# The allocations are empty, which means wipe them out. Internal
# to the allocation object this is signalled by a used value of 0.
# We still need to verify the consumer's generation, though, which
# we do in _ensure_consumer()
# NOTE(jaypipes): This will only occur 1.28+. The JSONSchema will
# prevent an empty allocations object from being passed when there is
# no consumer generation, so this is safe to do.
util.ensure_consumer(context, consumer_uuid, data.get('project_id'),
data.get('user_id'), data.get('consumer_generation'),
want_version)
allocations = rp_obj.AllocationList.get_all_by_consumer_id(
context, consumer_uuid)
for allocation in allocations:
allocation.used = 0
allocation_objects.append(allocation)
else:
# If the body includes an allocation for a resource provider
# that does not exist, raise a 400.
rp_objs = _resource_providers_by_uuid(context, allocation_data.keys())
consumer, created_new_consumer = util.ensure_consumer(
context, consumer_uuid, data.get('project_id'),
data.get('user_id'), data.get('consumer_generation'),
want_version)
for resource_provider_uuid, allocation in allocation_data.items():
resource_provider = rp_objs[resource_provider_uuid]
new_allocations = _new_allocations(context,
resource_provider,
consumer,
allocation['resources'])
allocation_objects.extend(new_allocations)
allocations = rp_obj.AllocationList(
context, objects=allocation_objects)
def _create_allocations(alloc_list):
try:
alloc_list.replace_all()
LOG.debug("Successfully wrote allocations %s", alloc_list)
except Exception:
if created_new_consumer:
delete_consumers([consumer])
raise
try:
_create_allocations(allocations)
# InvalidInventory is a parent for several exceptions that
# indicate either that Inventory is not present, or that
# capacity limits have been exceeded.
except exception.NotFound as exc:
raise webob.exc.HTTPBadRequest(
_("Unable to allocate inventory for consumer "
"%(consumer_uuid)s: %(error)s") %
{'consumer_uuid': consumer_uuid, 'error': exc})
except exception.InvalidInventory as exc:
raise webob.exc.HTTPConflict(
_('Unable to allocate inventory: %(error)s') % {'error': exc})
except exception.ConcurrentUpdateDetected as exc:
raise webob.exc.HTTPConflict(
_('Inventory and/or allocations changed while attempting to '
'allocate: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.0', '1.7')
@util.require_content('application/json')
def set_allocations_for_consumer(req):
return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA)
@wsgi_wrapper.PlacementWsgify # noqa
@microversion.version_handler('1.8', '1.11')
@util.require_content('application/json')
def set_allocations_for_consumer(req):
return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_8)
@wsgi_wrapper.PlacementWsgify # noqa
@microversion.version_handler('1.12', '1.27')
@util.require_content('application/json')
def set_allocations_for_consumer(req):
return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_12)
@wsgi_wrapper.PlacementWsgify # noqa
@microversion.version_handler('1.28')
@util.require_content('application/json')
def set_allocations_for_consumer(req):
return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_28)
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.13')
@util.require_content('application/json')
def set_allocations(req):
context = req.environ['placement.context']
context.can(policies.ALLOC_MANAGE)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
want_schema = schema.POST_ALLOCATIONS_V1_13
if want_version.matches((1, 28)):
want_schema = schema.POST_ALLOCATIONS_V1_28
data = util.extract_json(req.body, want_schema)
consumers, new_consumers_created = inspect_consumers(
context, data, want_version)
# Create a sequence of allocation objects to be used in one
# AllocationList.replace_all() call, which will mean all the changes
# happen within a single transaction and with resource provider
# and consumer generations (if applicable) check all in one go.
allocations = create_allocation_list(context, data, consumers)
def _create_allocations(alloc_list):
try:
alloc_list.replace_all()
LOG.debug("Successfully wrote allocations %s", alloc_list)
except Exception:
delete_consumers(new_consumers_created)
raise
try:
_create_allocations(allocations)
except exception.NotFound as exc:
raise webob.exc.HTTPBadRequest(
_("Unable to allocate inventory %(error)s") % {'error': exc})
except exception.InvalidInventory as exc:
# InvalidInventory is a parent for several exceptions that
# indicate either that Inventory is not present, or that
# capacity limits have been exceeded.
raise webob.exc.HTTPConflict(
_('Unable to allocate inventory: %(error)s') % {'error': exc})
except exception.ConcurrentUpdateDetected as exc:
raise webob.exc.HTTPConflict(
_('Inventory and/or allocations changed while attempting to '
'allocate: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
def delete_allocations(req):
context = req.environ['placement.context']
context.can(policies.ALLOC_DELETE)
consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
allocations = rp_obj.AllocationList.get_all_by_consumer_id(
context, consumer_uuid)
if allocations:
try:
allocations.delete_all()
# NOTE(pumaranikar): Following NotFound exception added in the case
# when allocation is deleted from allocations list by some other
# activity. In that case, delete_all() will throw a NotFound exception.
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("Allocation for consumer with id %(id)s not found."
"error: %(error)s") %
{'id': consumer_uuid, 'error': exc})
else:
raise webob.exc.HTTPNotFound(
_("No allocations for consumer '%(consumer_uuid)s'") %
{'consumer_uuid': consumer_uuid})
LOG.debug("Successfully deleted allocations %s", allocations)
req.response.status = 204
req.response.content_type = None
return req.response

View File

@ -1,332 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handlers for getting allocation candidates."""
import collections
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import six
import webob
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import allocation_candidate as \
policies
from nova.api.openstack.placement.schemas import allocation_candidate as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _transform_allocation_requests_dict(alloc_reqs):
"""Turn supplied list of AllocationRequest objects into a list of
allocations dicts keyed by resource provider uuid of resources involved
in the allocation request. The returned results are intended to be used
as the body of a PUT /allocations/{consumer_uuid} HTTP request at
micoversion 1.12 (and beyond). The JSON objects look like the following:
[
{
"allocations": {
$rp_uuid1: {
"resources": {
"MEMORY_MB": 512
...
}
},
$rp_uuid2: {
"resources": {
"DISK_GB": 1024
...
}
}
},
},
...
]
"""
results = []
for ar in alloc_reqs:
# A default dict of {$rp_uuid: "resources": {})
rp_resources = collections.defaultdict(lambda: dict(resources={}))
for rr in ar.resource_requests:
res_dict = rp_resources[rr.resource_provider.uuid]['resources']
res_dict[rr.resource_class] = rr.amount
results.append(dict(allocations=rp_resources))
return results
def _transform_allocation_requests_list(alloc_reqs):
"""Turn supplied list of AllocationRequest objects into a list of dicts of
resources involved in the allocation request. The returned results is
intended to be able to be used as the body of a PUT
/allocations/{consumer_uuid} HTTP request, prior to microversion 1.12,
so therefore we return a list of JSON objects that looks like the
following:
[
{
"allocations": [
{
"resource_provider": {
"uuid": $rp_uuid,
}
"resources": {
$resource_class: $requested_amount, ...
},
}, ...
],
}, ...
]
"""
results = []
for ar in alloc_reqs:
provider_resources = collections.defaultdict(dict)
for rr in ar.resource_requests:
res_dict = provider_resources[rr.resource_provider.uuid]
res_dict[rr.resource_class] = rr.amount
allocs = [
{
"resource_provider": {
"uuid": rp_uuid,
},
"resources": resources,
} for rp_uuid, resources in provider_resources.items()
]
alloc = {
"allocations": allocs
}
results.append(alloc)
return results
def _transform_provider_summaries(p_sums, requests, want_version):
"""Turn supplied list of ProviderSummary objects into a dict, keyed by
resource provider UUID, of dicts of provider and inventory information.
The traits only show up when `want_version` is 1.17 or newer. All the
resource classes are shown when `want_version` is 1.27 or newer while
only requested resources are included in the `provider_summaries`
for older versions. The parent and root provider uuids only show up
when `want_version` is 1.29 or newer.
{
RP_UUID_1: {
'resources': {
'DISK_GB': {
'capacity': 100,
'used': 0,
},
'VCPU': {
'capacity': 4,
'used': 0,
}
},
# traits shows up from microversion 1.17
'traits': [
'HW_CPU_X86_AVX512F',
'HW_CPU_X86_AVX512CD'
]
# parent/root provider uuids show up from microversion 1.29
parent_provider_uuid: null,
root_provider_uuid: RP_UUID_1
},
RP_UUID_2: {
'resources': {
'DISK_GB': {
'capacity': 100,
'used': 0,
},
'VCPU': {
'capacity': 4,
'used': 0,
}
},
# traits shows up from microversion 1.17
'traits': [
'HW_NIC_OFFLOAD_TSO',
'HW_NIC_OFFLOAD_GRO'
],
# parent/root provider uuids show up from microversion 1.29
parent_provider_uuid: null,
root_provider_uuid: RP_UUID_2
}
}
"""
include_traits = want_version.matches((1, 17))
include_all_resources = want_version.matches((1, 27))
enable_nested_providers = want_version.matches((1, 29))
ret = {}
requested_resources = set()
for requested_group in requests.values():
requested_resources |= set(requested_group.resources)
# if include_all_resources is false, only requested resources are
# included in the provider_summaries.
for ps in p_sums:
resources = {
psr.resource_class: {
'capacity': psr.capacity,
'used': psr.used,
} for psr in ps.resources if (
include_all_resources or
psr.resource_class in requested_resources)
}
ret[ps.resource_provider.uuid] = {'resources': resources}
if include_traits:
ret[ps.resource_provider.uuid]['traits'] = [
t.name for t in ps.traits]
if enable_nested_providers:
ret[ps.resource_provider.uuid]['parent_provider_uuid'] = (
ps.resource_provider.parent_provider_uuid)
ret[ps.resource_provider.uuid]['root_provider_uuid'] = (
ps.resource_provider.root_provider_uuid)
return ret
def _exclude_nested_providers(alloc_cands):
"""Exclude allocation requests and provider summaries for old microversions
if they involve more than one provider from the same tree.
"""
# Build a temporary dict, keyed by root RP UUID of sets of UUIDs of all RPs
# in that tree.
tree_rps_by_root = collections.defaultdict(set)
for ps in alloc_cands.provider_summaries:
rp_uuid = ps.resource_provider.uuid
root_uuid = ps.resource_provider.root_provider_uuid
tree_rps_by_root[root_uuid].add(rp_uuid)
# We use this to get a list of sets of providers in each tree
tree_sets = list(tree_rps_by_root.values())
for a_req in alloc_cands.allocation_requests[:]:
alloc_rp_uuids = set([
arr.resource_provider.uuid for arr in a_req.resource_requests])
# If more than one allocation is provided by the same tree, kill
# that allocation request.
if any(len(tree_set & alloc_rp_uuids) > 1 for tree_set in tree_sets):
alloc_cands.allocation_requests.remove(a_req)
# Exclude eliminated providers from the provider summaries.
all_rp_uuids = set()
for a_req in alloc_cands.allocation_requests:
all_rp_uuids |= set(
arr.resource_provider.uuid for arr in a_req.resource_requests)
for ps in alloc_cands.provider_summaries[:]:
if ps.resource_provider.uuid not in all_rp_uuids:
alloc_cands.provider_summaries.remove(ps)
return alloc_cands
def _transform_allocation_candidates(alloc_cands, requests, want_version):
"""Turn supplied AllocationCandidates object into a dict containing
allocation requests and provider summaries.
{
'allocation_requests': <ALLOC_REQUESTS>,
'provider_summaries': <PROVIDER_SUMMARIES>,
}
"""
# exclude nested providers with old microversions
if not want_version.matches((1, 29)):
alloc_cands = _exclude_nested_providers(alloc_cands)
if want_version.matches((1, 12)):
a_reqs = _transform_allocation_requests_dict(
alloc_cands.allocation_requests)
else:
a_reqs = _transform_allocation_requests_list(
alloc_cands.allocation_requests)
p_sums = _transform_provider_summaries(
alloc_cands.provider_summaries, requests, want_version)
return {
'allocation_requests': a_reqs,
'provider_summaries': p_sums,
}
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.10')
@util.check_accept('application/json')
def list_allocation_candidates(req):
"""GET a JSON object with a list of allocation requests and a JSON object
of provider summary objects
On success return a 200 and an application/json body representing
a collection of allocation requests and provider summaries
"""
context = req.environ['placement.context']
context.can(policies.LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
get_schema = schema.GET_SCHEMA_1_10
if want_version.matches((1, 25)):
get_schema = schema.GET_SCHEMA_1_25
elif want_version.matches((1, 21)):
get_schema = schema.GET_SCHEMA_1_21
elif want_version.matches((1, 17)):
get_schema = schema.GET_SCHEMA_1_17
elif want_version.matches((1, 16)):
get_schema = schema.GET_SCHEMA_1_16
util.validate_query_params(req, get_schema)
requests = util.parse_qs_request_groups(req)
limit = req.GET.getall('limit')
# JSONschema has already confirmed that limit has the form
# of an integer.
if limit:
limit = int(limit[0])
group_policy = req.GET.getall('group_policy') or None
# Schema ensures we get either "none" or "isolate"
if group_policy:
group_policy = group_policy[0]
else:
# group_policy is required if more than one numbered request group was
# specified.
if len([rg for rg in requests.values() if rg.use_same_provider]) > 1:
raise webob.exc.HTTPBadRequest(
_('The "group_policy" parameter is required when specifying '
'more than one "resources{N}" parameter.'))
try:
cands = rp_obj.AllocationCandidates.get_by_requests(
context, requests, limit=limit, group_policy=group_policy)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid resource class in resources parameter: %(error)s') %
{'error': exc})
except exception.TraitNotFound as exc:
raise webob.exc.HTTPBadRequest(six.text_type(exc))
response = req.response
trx_cands = _transform_allocation_candidates(cands, requests, want_version)
json_data = jsonutils.dumps(trx_cands)
response.body = encodeutils.to_utf8(json_data)
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.cache_control = 'no-cache'
response.last_modified = timeutils.utcnow(with_timezone=True)
return response

View File

@ -1,467 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inventory handlers for Placement API."""
import copy
import operator
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import inventory as policies
from nova.api.openstack.placement.schemas import inventory as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.db import constants as db_const
from nova.i18n import _
# NOTE(cdent): We keep our own representation of inventory defaults
# and output fields, separate from the versioned object to avoid
# inadvertent API changes when the object defaults are changed.
OUTPUT_INVENTORY_FIELDS = [
'total',
'reserved',
'min_unit',
'max_unit',
'step_size',
'allocation_ratio',
]
INVENTORY_DEFAULTS = {
'reserved': 0,
'min_unit': 1,
'max_unit': db_const.MAX_INT,
'step_size': 1,
'allocation_ratio': 1.0
}
def _extract_inventory(body, schema):
"""Extract and validate inventory from JSON body."""
data = util.extract_json(body, schema)
inventory_data = copy.copy(INVENTORY_DEFAULTS)
inventory_data.update(data)
return inventory_data
def _extract_inventories(body, schema):
"""Extract and validate multiple inventories from JSON body."""
data = util.extract_json(body, schema)
inventories = {}
for res_class, raw_inventory in data['inventories'].items():
inventory_data = copy.copy(INVENTORY_DEFAULTS)
inventory_data.update(raw_inventory)
inventories[res_class] = inventory_data
data['inventories'] = inventories
return data
def make_inventory_object(resource_provider, resource_class, **data):
"""Single place to catch malformed Inventories."""
# TODO(cdent): Some of the validation checks that are done here
# could be done via JSONschema (using, for example, "minimum":
# 0) for non-negative integers. It's not clear if that is
# duplication or decoupling so leaving it as this for now.
try:
inventory = rp_obj.Inventory(
resource_provider=resource_provider,
resource_class=resource_class, **data)
except (ValueError, TypeError) as exc:
raise webob.exc.HTTPBadRequest(
_('Bad inventory %(class)s for resource provider '
'%(rp_uuid)s: %(error)s') % {'class': resource_class,
'rp_uuid': resource_provider.uuid,
'error': exc})
return inventory
def _send_inventories(req, resource_provider, inventories):
"""Send a JSON representation of a list of inventories."""
response = req.response
response.status = 200
output, last_modified = _serialize_inventories(
inventories, resource_provider.generation)
response.body = encodeutils.to_utf8(jsonutils.dumps(output))
response.content_type = 'application/json'
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
def _send_inventory(req, resource_provider, inventory, status=200):
"""Send a JSON representation of one single inventory."""
response = req.response
response.status = status
response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_inventory(
inventory, generation=resource_provider.generation)))
response.content_type = 'application/json'
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
if want_version.matches((1, 15)):
modified = util.pick_last_modified(None, inventory)
response.last_modified = modified
response.cache_control = 'no-cache'
return response
def _serialize_inventory(inventory, generation=None):
"""Turn a single inventory into a dictionary."""
data = {
field: getattr(inventory, field)
for field in OUTPUT_INVENTORY_FIELDS
}
if generation:
data['resource_provider_generation'] = generation
return data
def _serialize_inventories(inventories, generation):
"""Turn a list of inventories in a dict by resource class."""
inventories_by_class = {inventory.resource_class: inventory
for inventory in inventories}
inventories_dict = {}
last_modified = None
for resource_class, inventory in inventories_by_class.items():
last_modified = util.pick_last_modified(last_modified, inventory)
inventories_dict[resource_class] = _serialize_inventory(
inventory, generation=None)
return ({'resource_provider_generation': generation,
'inventories': inventories_dict}, last_modified)
def _validate_inventory_capacity(version, inventories):
"""Validate inventory capacity.
:param version: request microversion.
:param inventories: Inventory or InventoryList to validate capacities of.
:raises: exception.InvalidInventoryCapacityReservedCanBeTotal if request
microversion is 1.26 or higher and any inventory has capacity < 0.
:raises: exception.InvalidInventoryCapacity if request
microversion is lower than 1.26 and any inventory has capacity <= 0.
"""
if not version.matches((1, 26)):
op = operator.le
exc_class = exception.InvalidInventoryCapacity
else:
op = operator.lt
exc_class = exception.InvalidInventoryCapacityReservedCanBeTotal
if isinstance(inventories, rp_obj.Inventory):
inventories = rp_obj.InventoryList(objects=[inventories])
for inventory in inventories:
if op(inventory.capacity, 0):
raise exc_class(
resource_class=inventory.resource_class,
resource_provider=inventory.resource_provider.uuid)
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def create_inventory(req):
"""POST to create one inventory.
On success return a 201 response, a location header pointing
to the newly created inventory and an application/json representation
of the inventory.
"""
context = req.environ['placement.context']
context.can(policies.CREATE)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventory(req.body, schema.POST_INVENTORY_SCHEMA)
resource_class = data.pop('resource_class')
inventory = make_inventory_object(resource_provider,
resource_class,
**data)
try:
_validate_inventory_capacity(
req.environ[microversion.MICROVERSION_ENVIRON], inventory)
resource_provider.add_inventory(inventory)
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('Update conflict: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
except (exception.InvalidInventoryCapacity,
exception.NotFound) as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to create inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
response = req.response
response.location = util.inventory_url(
req.environ, resource_provider, resource_class)
return _send_inventory(req, resource_provider, inventory,
status=201)
@wsgi_wrapper.PlacementWsgify
def delete_inventory(req):
"""DELETE to destroy a single inventory.
If the inventory is in use or resource provider generation is out
of sync return a 409.
On success return a 204 and an empty body.
"""
context = req.environ['placement.context']
context.can(policies.DELETE)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
try:
resource_provider.delete_inventory(resource_class)
except (exception.ConcurrentUpdateDetected,
exception.InventoryInUse) as exc:
raise webob.exc.HTTPConflict(
_('Unable to delete inventory of class %(class)s: %(error)s') %
{'class': resource_class, 'error': exc},
comment=errors.CONCURRENT_UPDATE)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_('No inventory of class %(class)s found for delete: %(error)s') %
{'class': resource_class, 'error': exc})
response = req.response
response.status = 204
response.content_type = None
return response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def get_inventories(req):
"""GET a list of inventories.
On success return a 200 with an application/json body representing
a collection of inventories.
"""
context = req.environ['placement.context']
context.can(policies.LIST)
uuid = util.wsgi_path_item(req.environ, 'uuid')
try:
rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %(uuid)s found : %(error)s") %
{'uuid': uuid, 'error': exc})
inv_list = rp_obj.InventoryList.get_all_by_resource_provider(context, rp)
return _send_inventories(req, rp, inv_list)
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def get_inventory(req):
"""GET one inventory.
On success return a 200 an application/json body representing one
inventory.
"""
context = req.environ['placement.context']
context.can(policies.SHOW)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
try:
rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %(uuid)s found : %(error)s") %
{'uuid': uuid, 'error': exc})
inv_list = rp_obj.InventoryList.get_all_by_resource_provider(context, rp)
inventory = inv_list.find(resource_class)
if not inventory:
raise webob.exc.HTTPNotFound(
_('No inventory of class %(class)s for %(rp_uuid)s') %
{'class': resource_class, 'rp_uuid': uuid})
return _send_inventory(req, rp, inventory)
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def set_inventories(req):
"""PUT to set all inventory for a resource provider.
Create, update and delete inventory as required to reset all
the inventory.
If the resource generation is out of sync, return a 409.
If an inventory to be deleted is in use, return a 409.
If any inventory to be created or updated has settings which are
invalid (for example reserved exceeds capacity), return a 400.
On success return a 200 with an application/json body representing
the inventories.
"""
context = req.environ['placement.context']
context.can(policies.UPDATE)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventories(req.body, schema.PUT_INVENTORY_SCHEMA)
if data['resource_provider_generation'] != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict'),
comment=errors.CONCURRENT_UPDATE)
inv_list = []
for res_class, inventory_data in data['inventories'].items():
inventory = make_inventory_object(
resource_provider, res_class, **inventory_data)
inv_list.append(inventory)
inventories = rp_obj.InventoryList(objects=inv_list)
try:
_validate_inventory_capacity(
req.environ[microversion.MICROVERSION_ENVIRON], inventories)
resource_provider.set_inventory(inventories)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Unknown resource class in inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
except exception.InventoryWithResourceClassNotFound as exc:
raise webob.exc.HTTPConflict(
_('Race condition detected when setting inventory. No inventory '
'record with resource class for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
except exception.InventoryInUse as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
comment=errors.INVENTORY_INUSE)
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to update inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
return _send_inventories(req, resource_provider, inventories)
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.5', status_code=405)
def delete_inventories(req):
"""DELETE all inventory for a resource provider.
Delete inventory as required to reset all the inventory.
If an inventory to be deleted is in use, return a 409 Conflict.
On success return a 204 No content.
Return 405 Method Not Allowed if the wanted microversion does not match.
"""
context = req.environ['placement.context']
context.can(policies.DELETE)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
inventories = rp_obj.InventoryList(objects=[])
try:
resource_provider.set_inventory(inventories)
except exception.ConcurrentUpdateDetected:
raise webob.exc.HTTPConflict(
_('Unable to delete inventory for resource provider '
'%(rp_uuid)s because the inventory was updated by '
'another process. Please retry your request.')
% {'rp_uuid': resource_provider.uuid},
comment=errors.CONCURRENT_UPDATE)
except exception.InventoryInUse as ex:
# NOTE(mriedem): This message cannot change without impacting the
# nova.scheduler.client.report._RE_INV_IN_USE regex.
raise webob.exc.HTTPConflict(ex.format_message(),
comment=errors.INVENTORY_INUSE)
response = req.response
response.status = 204
response.content_type = None
return response
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def update_inventory(req):
"""PUT to update one inventory.
If the resource generation is out of sync, return a 409.
If the inventory has settings which are invalid (for example
reserved exceeds capacity), return a 400.
On success return a 200 with an application/json body representing
the inventory.
"""
context = req.environ['placement.context']
context.can(policies.UPDATE)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_class = util.wsgi_path_item(req.environ, 'resource_class')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
data = _extract_inventory(req.body, schema.BASE_INVENTORY_SCHEMA)
if data['resource_provider_generation'] != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict'),
comment=errors.CONCURRENT_UPDATE)
inventory = make_inventory_object(resource_provider,
resource_class,
**data)
try:
_validate_inventory_capacity(
req.environ[microversion.MICROVERSION_ENVIRON], inventory)
resource_provider.update_inventory(inventory)
except (exception.ConcurrentUpdateDetected,
db_exc.DBDuplicateEntry) as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
except exception.InventoryWithResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('No inventory record with resource class for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
except exception.InvalidInventoryCapacity as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to update inventory for resource provider '
'%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
'error': exc})
return _send_inventory(req, resource_provider, inventory)

View File

@ -1,129 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handler for the reshaper.
The reshaper provides for atomically migrating resource provider inventories
and associated allocations when some of the inventory moves from one resource
provider to another, such as when a class of inventory moves from a parent
provider to a new child provider.
"""
import copy
from oslo_utils import excutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
# TODO(cdent): That we are doing this suggests that there's stuff to be
# extracted from the handler to a shared module.
from nova.api.openstack.placement.handlers import allocation
from nova.api.openstack.placement.handlers import inventory
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import reshaper as policies
from nova.api.openstack.placement.schemas import reshaper as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
# TODO(cdent): placement needs its own version of this
from nova.i18n import _
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.30')
@util.require_content('application/json')
def reshape(req):
context = req.environ['placement.context']
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
context.can(policies.RESHAPE)
data = util.extract_json(req.body, schema.POST_RESHAPER_SCHEMA)
inventories = data['inventories']
allocations = data['allocations']
# We're going to create several InventoryList, by rp uuid.
inventory_by_rp = {}
# TODO(cdent): this has overlaps with inventory:set_inventories
# and is a mess of bad names and lack of method extraction.
for rp_uuid, inventory_data in inventories.items():
try:
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, rp_uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Resource provider %(rp_uuid)s in inventories not found: '
'%(error)s') % {'rp_uuid': rp_uuid, 'error': exc},
comment=errors.RESOURCE_PROVIDER_NOT_FOUND)
# Do an early generation check.
generation = inventory_data['resource_provider_generation']
if generation != resource_provider.generation:
raise webob.exc.HTTPConflict(
_('resource provider generation conflict for provider %(rp)s: '
'actual: %(actual)s, given: %(given)s') %
{'rp': rp_uuid,
'actual': resource_provider.generation,
'given': generation},
comment=errors.CONCURRENT_UPDATE)
inv_list = []
for res_class, raw_inventory in inventory_data['inventories'].items():
inv_data = copy.copy(inventory.INVENTORY_DEFAULTS)
inv_data.update(raw_inventory)
inv_obj = inventory.make_inventory_object(
resource_provider, res_class, **inv_data)
inv_list.append(inv_obj)
inventory_by_rp[resource_provider] = rp_obj.InventoryList(
objects=inv_list)
# Make the consumer objects associated with the allocations.
consumers, new_consumers_created = allocation.inspect_consumers(
context, allocations, want_version)
# Nest exception handling so that any exception results in new consumer
# objects being deleted, then reraise for translating to HTTP exceptions.
try:
try:
# When these allocations are created they get resource provider
# objects which are different instances (usually with the same
# data) from those loaded above when creating inventory objects.
# The reshape method below is responsible for ensuring that the
# resource providers and their generations do not conflict.
allocation_objects = allocation.create_allocation_list(
context, allocations, consumers)
rp_obj.reshape(context, inventory_by_rp, allocation_objects)
except Exception:
with excutils.save_and_reraise_exception():
allocation.delete_consumers(new_consumers_created)
# Generation conflict is a (rare) possibility in a few different
# places in reshape().
except exception.ConcurrentUpdateDetected as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
comment=errors.CONCURRENT_UPDATE)
# A NotFound here means a resource class that does not exist was named
except exception.NotFound as exc:
raise webob.exc.HTTPBadRequest(
_('malformed reshaper data: %(error)s') % {'error': exc})
# Distinguish inventory in use (has allocations on it)...
except exception.InventoryInUse as exc:
raise webob.exc.HTTPConflict(
_('update conflict: %(error)s') % {'error': exc},
comment=errors.INVENTORY_INUSE)
# ...from allocations which won't fit for a variety of reasons.
except exception.InvalidInventory as exc:
raise webob.exc.HTTPConflict(
_('Unable to allocate inventory: %(error)s') % {'error': exc})
req.response.status = 204
req.response.content_type = None
return req.response

View File

@ -1,241 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handlers for resource classes."""
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import webob
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import resource_class as policies
from nova.api.openstack.placement.schemas import resource_class as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _serialize_links(environ, rc):
url = util.resource_class_url(environ, rc)
links = [{'rel': 'self', 'href': url}]
return links
def _serialize_resource_class(environ, rc):
data = {
'name': rc.name,
'links': _serialize_links(environ, rc)
}
return data
def _serialize_resource_classes(environ, rcs, want_version):
output = []
last_modified = None
get_last_modified = want_version.matches((1, 15))
for rc in rcs:
if get_last_modified:
last_modified = util.pick_last_modified(last_modified, rc)
data = _serialize_resource_class(environ, rc)
output.append(data)
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return ({"resource_classes": output}, last_modified)
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.2')
@util.require_content('application/json')
def create_resource_class(req):
"""POST to create a resource class.
On success return a 201 response with an empty body and a location
header pointing to the newly created resource class.
"""
context = req.environ['placement.context']
context.can(policies.CREATE)
data = util.extract_json(req.body, schema.POST_RC_SCHEMA_V1_2)
try:
rc = rp_obj.ResourceClass(context, name=data['name'])
rc.create()
except exception.ResourceClassExists:
raise webob.exc.HTTPConflict(
_('Conflicting resource class already exists: %(name)s') %
{'name': data['name']})
except exception.MaxDBRetriesExceeded:
raise webob.exc.HTTPConflict(
_('Max retries of DB transaction exceeded attempting '
'to create resource class: %(name)s, please '
'try again.') %
{'name': data['name']})
req.response.location = util.resource_class_url(req.environ, rc)
req.response.status = 201
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.2')
def delete_resource_class(req):
"""DELETE to destroy a single resource class.
On success return a 204 and an empty body.
"""
name = util.wsgi_path_item(req.environ, 'name')
context = req.environ['placement.context']
context.can(policies.DELETE)
# The containing application will catch a not found here.
rc = rp_obj.ResourceClass.get_by_name(context, name)
try:
rc.destroy()
except exception.ResourceClassCannotDeleteStandard as exc:
raise webob.exc.HTTPBadRequest(
_('Error in delete resource class: %(error)s') % {'error': exc})
except exception.ResourceClassInUse as exc:
raise webob.exc.HTTPConflict(
_('Error in delete resource class: %(error)s') % {'error': exc})
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.2')
@util.check_accept('application/json')
def get_resource_class(req):
"""Get a single resource class.
On success return a 200 with an application/json body representing
the resource class.
"""
name = util.wsgi_path_item(req.environ, 'name')
context = req.environ['placement.context']
context.can(policies.SHOW)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# The containing application will catch a not found here.
rc = rp_obj.ResourceClass.get_by_name(context, name)
req.response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_resource_class(req.environ, rc))
)
req.response.content_type = 'application/json'
if want_version.matches((1, 15)):
req.response.cache_control = 'no-cache'
# Non-custom resource classes will return None from pick_last_modified,
# so the 'or' causes utcnow to be used.
last_modified = util.pick_last_modified(None, rc) or timeutils.utcnow(
with_timezone=True)
req.response.last_modified = last_modified
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.2')
@util.check_accept('application/json')
def list_resource_classes(req):
"""GET a list of resource classes.
On success return a 200 and an application/json body representing
a collection of resource classes.
"""
context = req.environ['placement.context']
context.can(policies.LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
rcs = rp_obj.ResourceClassList.get_all(context)
response = req.response
output, last_modified = _serialize_resource_classes(
req.environ, rcs, want_version)
response.body = encodeutils.to_utf8(jsonutils.dumps(output))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.2', '1.6')
@util.require_content('application/json')
def update_resource_class(req):
"""PUT to update a single resource class.
On success return a 200 response with a representation of the updated
resource class.
"""
name = util.wsgi_path_item(req.environ, 'name')
context = req.environ['placement.context']
context.can(policies.UPDATE)
data = util.extract_json(req.body, schema.PUT_RC_SCHEMA_V1_2)
# The containing application will catch a not found here.
rc = rp_obj.ResourceClass.get_by_name(context, name)
rc.name = data['name']
try:
rc.save()
except exception.ResourceClassExists:
raise webob.exc.HTTPConflict(
_('Resource class already exists: %(name)s') %
{'name': rc.name})
except exception.ResourceClassCannotUpdateStandard:
raise webob.exc.HTTPBadRequest(
_('Cannot update standard resource class %(rp_name)s') %
{'rp_name': name})
req.response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_resource_class(req.environ, rc))
)
req.response.status = 200
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify # noqa
@microversion.version_handler('1.7')
def update_resource_class(req):
"""PUT to create or validate the existence of single resource class.
On a successful create return 201. Return 204 if the class already
exists. If the resource class is not a custom resource class, return
a 400. 409 might be a better choice, but 400 aligns with previous code.
"""
name = util.wsgi_path_item(req.environ, 'name')
context = req.environ['placement.context']
context.can(policies.UPDATE)
# Use JSON validation to validation resource class name.
util.extract_json('{"name": "%s"}' % name, schema.PUT_RC_SCHEMA_V1_2)
status = 204
try:
rc = rp_obj.ResourceClass.get_by_name(context, name)
except exception.NotFound:
try:
rc = rp_obj.ResourceClass(context, name=name)
rc.create()
status = 201
# We will not see ResourceClassCannotUpdateStandard because
# that was already caught when validating the {name}.
except exception.ResourceClassExists:
# Someone just now created the class, so stick with 204
pass
req.response.status = status
req.response.content_type = None
req.response.location = util.resource_class_url(req.environ, rc)
return req.response

View File

@ -1,308 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handlers for resource providers."""
import uuid as uuidlib
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import resource_provider as policies
from nova.api.openstack.placement.schemas import resource_provider as rp_schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _serialize_links(environ, resource_provider):
url = util.resource_provider_url(environ, resource_provider)
links = [{'rel': 'self', 'href': url}]
rel_types = ['inventories', 'usages']
want_version = environ[microversion.MICROVERSION_ENVIRON]
if want_version >= (1, 1):
rel_types.append('aggregates')
if want_version >= (1, 6):
rel_types.append('traits')
if want_version >= (1, 11):
rel_types.append('allocations')
for rel in rel_types:
links.append({'rel': rel, 'href': '%s/%s' % (url, rel)})
return links
def _serialize_provider(environ, resource_provider, want_version):
data = {
'uuid': resource_provider.uuid,
'name': resource_provider.name,
'generation': resource_provider.generation,
'links': _serialize_links(environ, resource_provider)
}
if want_version.matches((1, 14)):
data['parent_provider_uuid'] = resource_provider.parent_provider_uuid
data['root_provider_uuid'] = resource_provider.root_provider_uuid
return data
def _serialize_providers(environ, resource_providers, want_version):
output = []
last_modified = None
get_last_modified = want_version.matches((1, 15))
for provider in resource_providers:
if get_last_modified:
last_modified = util.pick_last_modified(last_modified, provider)
provider_data = _serialize_provider(environ, provider, want_version)
output.append(provider_data)
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return ({"resource_providers": output}, last_modified)
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def create_resource_provider(req):
"""POST to create a resource provider.
On success return a 201 response with an empty body
(microversions 1.0 - 1.19) or a 200 response with a
payload representing the newly created resource provider
(microversions 1.20 - latest), and a location header
pointing to the resource provider.
"""
context = req.environ['placement.context']
context.can(policies.CREATE)
schema = rp_schema.POST_RESOURCE_PROVIDER_SCHEMA
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
if want_version.matches((1, 14)):
schema = rp_schema.POST_RP_SCHEMA_V1_14
data = util.extract_json(req.body, schema)
try:
if data.get('uuid'):
# Normalize UUID with no proper dashes into dashed one
# with format {8}-{4}-{4}-{4}-{12}
data['uuid'] = str(uuidlib.UUID(data['uuid']))
else:
data['uuid'] = uuidutils.generate_uuid()
resource_provider = rp_obj.ResourceProvider(context, **data)
resource_provider.create()
except db_exc.DBDuplicateEntry as exc:
# Whether exc.columns has one or two entries (in the event
# of both fields being duplicates) appears to be database
# dependent, so going with the complete solution here.
duplicate = ', '.join(['%s: %s' % (column, data[column])
for column in exc.columns])
raise webob.exc.HTTPConflict(
_('Conflicting resource provider %(duplicate)s already exists.') %
{'duplicate': duplicate},
comment=errors.DUPLICATE_NAME)
except exception.ObjectActionError as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to create resource provider "%(name)s", %(rp_uuid)s: '
'%(error)s') %
{'name': data['name'], 'rp_uuid': data['uuid'], 'error': exc})
req.response.location = util.resource_provider_url(
req.environ, resource_provider)
if want_version.matches(min_version=(1, 20)):
req.response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_provider(req.environ, resource_provider, want_version)))
req.response.content_type = 'application/json'
modified = util.pick_last_modified(None, resource_provider)
req.response.last_modified = modified
req.response.cache_control = 'no-cache'
else:
req.response.status = 201
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
def delete_resource_provider(req):
"""DELETE to destroy a single resource provider.
On success return a 204 and an empty body.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
context.can(policies.DELETE)
# The containing application will catch a not found here.
try:
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
resource_provider.destroy()
except exception.ResourceProviderInUse as exc:
raise webob.exc.HTTPConflict(
_('Unable to delete resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc},
comment=errors.PROVIDER_IN_USE)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %s found for delete") % uuid)
except exception.CannotDeleteParentResourceProvider as exc:
raise webob.exc.HTTPConflict(
_("Unable to delete parent resource provider %(rp_uuid)s: "
"It has child resource providers.") % {'rp_uuid': uuid},
comment=errors.PROVIDER_CANNOT_DELETE_PARENT)
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def get_resource_provider(req):
"""Get a single resource provider.
On success return a 200 with an application/json body representing
the resource provider.
"""
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
context.can(policies.SHOW)
# The containing application will catch a not found here.
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
response = req.response
response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_provider(req.environ, resource_provider, want_version)))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
modified = util.pick_last_modified(None, resource_provider)
response.last_modified = modified
response.cache_control = 'no-cache'
return response
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def list_resource_providers(req):
"""GET a list of resource providers.
On success return a 200 and an application/json body representing
a collection of resource providers.
"""
context = req.environ['placement.context']
context.can(policies.LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
schema = rp_schema.GET_RPS_SCHEMA_1_0
if want_version.matches((1, 18)):
schema = rp_schema.GET_RPS_SCHEMA_1_18
elif want_version.matches((1, 14)):
schema = rp_schema.GET_RPS_SCHEMA_1_14
elif want_version.matches((1, 4)):
schema = rp_schema.GET_RPS_SCHEMA_1_4
elif want_version.matches((1, 3)):
schema = rp_schema.GET_RPS_SCHEMA_1_3
allow_forbidden = want_version.matches((1, 22))
util.validate_query_params(req, schema)
filters = {}
# special handling of member_of qparam since we allow multiple member_of
# params at microversion 1.24.
if 'member_of' in req.GET:
filters['member_of'] = util.normalize_member_of_qs_params(req)
qpkeys = ('uuid', 'name', 'in_tree', 'resources', 'required')
for attr in qpkeys:
if attr in req.GET:
value = req.GET[attr]
if attr == 'resources':
value = util.normalize_resources_qs_param(value)
elif attr == 'required':
value = util.normalize_traits_qs_param(
value, allow_forbidden=allow_forbidden)
filters[attr] = value
try:
resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
context, filters)
except exception.ResourceClassNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid resource class in resources parameter: %(error)s') %
{'error': exc})
except exception.TraitNotFound as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid trait(s) in "required" parameter: %(error)s') %
{'error': exc})
response = req.response
output, last_modified = _serialize_providers(
req.environ, resource_providers, want_version)
response.body = encodeutils.to_utf8(jsonutils.dumps(output))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = last_modified
response.cache_control = 'no-cache'
return response
@wsgi_wrapper.PlacementWsgify
@util.require_content('application/json')
def update_resource_provider(req):
"""PUT to update a single resource provider.
On success return a 200 response with a representation of the updated
resource provider.
"""
uuid = util.wsgi_path_item(req.environ, 'uuid')
context = req.environ['placement.context']
context.can(policies.UPDATE)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# The containing application will catch a not found here.
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
schema = rp_schema.PUT_RESOURCE_PROVIDER_SCHEMA
if want_version.matches((1, 14)):
schema = rp_schema.PUT_RP_SCHEMA_V1_14
data = util.extract_json(req.body, schema)
for field in rp_obj.ResourceProvider.SETTABLE_FIELDS:
if field in data:
setattr(resource_provider, field, data[field])
try:
resource_provider.save()
except db_exc.DBDuplicateEntry as exc:
raise webob.exc.HTTPConflict(
_('Conflicting resource provider %(name)s already exists.') %
{'name': data['name']},
comment=errors.DUPLICATE_NAME)
except exception.ObjectActionError as exc:
raise webob.exc.HTTPBadRequest(
_('Unable to save resource provider %(rp_uuid)s: %(error)s') %
{'rp_uuid': uuid, 'error': exc})
response = req.response
response.status = 200
response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_provider(req.environ, resource_provider, want_version)))
response.content_type = 'application/json'
if want_version.matches((1, 15)):
response.last_modified = resource_provider.updated_at
response.cache_control = 'no-cache'
return response

View File

@ -1,54 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handler for the root of the Placement API."""
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import wsgi_wrapper
@wsgi_wrapper.PlacementWsgify
def home(req):
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
min_version = microversion.min_version_string()
max_version = microversion.max_version_string()
# NOTE(cdent): As sections of the api are added, links can be
# added to this output to align with the guidelines at
# http://specs.openstack.org/openstack/api-wg/guidelines/microversion_specification.html#version-discovery
version_data = {
'id': 'v%s' % min_version,
'max_version': max_version,
'min_version': min_version,
# for now there is only ever one version, so it must be CURRENT
'status': 'CURRENT',
'links': [{
# Point back to this same URL as the root of this version.
# NOTE(cdent): We explicitly want this to be a relative-URL
# representation of "this same URL", otherwise placement needs
# to keep track of proxy addresses and the like, which we have
# avoided thus far, in order to construct full URLs. Placement
# is much easier to scale if we never track that stuff.
'rel': 'self',
'href': '',
}],
}
version_json = jsonutils.dumps({'versions': [version_data]})
req.response.body = encodeutils.to_utf8(version_json)
req.response.content_type = 'application/json'
if want_version.matches((1, 15)):
req.response.cache_control = 'no-cache'
req.response.last_modified = timeutils.utcnow(with_timezone=True)
return req.response

View File

@ -1,270 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Traits handlers for Placement API."""
import jsonschema
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import trait as policies
from nova.api.openstack.placement.schemas import trait as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _normalize_traits_qs_param(qs):
try:
op, value = qs.split(':', 1)
except ValueError:
msg = _('Badly formatted name parameter. Expected name query string '
'parameter in form: '
'?name=[in|startswith]:[name1,name2|prefix]. Got: "%s"')
msg = msg % qs
raise webob.exc.HTTPBadRequest(msg)
filters = {}
if op == 'in':
filters['name_in'] = value.split(',')
elif op == 'startswith':
filters['prefix'] = value
return filters
def _serialize_traits(traits, want_version):
last_modified = None
get_last_modified = want_version.matches((1, 15))
trait_names = []
for trait in traits:
if get_last_modified:
last_modified = util.pick_last_modified(last_modified, trait)
trait_names.append(trait.name)
# If there were no traits, set last_modified to now
last_modified = last_modified or timeutils.utcnow(with_timezone=True)
return {'traits': trait_names}, last_modified
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def put_trait(req):
context = req.environ['placement.context']
context.can(policies.TRAITS_UPDATE)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
name = util.wsgi_path_item(req.environ, 'name')
try:
jsonschema.validate(name, schema.CUSTOM_TRAIT)
except jsonschema.ValidationError:
raise webob.exc.HTTPBadRequest(
_('The trait is invalid. A valid trait must be no longer than '
'255 characters, start with the prefix "CUSTOM_" and use '
'following characters: "A"-"Z", "0"-"9" and "_"'))
trait = rp_obj.Trait(context)
trait.name = name
try:
trait.create()
req.response.status = 201
except exception.TraitExists:
# Get the trait that already exists to get last-modified time.
if want_version.matches((1, 15)):
trait = rp_obj.Trait.get_by_name(context, name)
req.response.status = 204
req.response.content_type = None
req.response.location = util.trait_url(req.environ, trait)
if want_version.matches((1, 15)):
req.response.last_modified = trait.created_at
req.response.cache_control = 'no-cache'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def get_trait(req):
context = req.environ['placement.context']
context.can(policies.TRAITS_SHOW)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
name = util.wsgi_path_item(req.environ, 'name')
try:
trait = rp_obj.Trait.get_by_name(context, name)
except exception.TraitNotFound as ex:
raise webob.exc.HTTPNotFound(ex.format_message())
req.response.status = 204
req.response.content_type = None
if want_version.matches((1, 15)):
req.response.last_modified = trait.created_at
req.response.cache_control = 'no-cache'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def delete_trait(req):
context = req.environ['placement.context']
context.can(policies.TRAITS_DELETE)
name = util.wsgi_path_item(req.environ, 'name')
try:
trait = rp_obj.Trait.get_by_name(context, name)
trait.destroy()
except exception.TraitNotFound as ex:
raise webob.exc.HTTPNotFound(ex.format_message())
except exception.TraitCannotDeleteStandard as ex:
raise webob.exc.HTTPBadRequest(ex.format_message())
except exception.TraitInUse as ex:
raise webob.exc.HTTPConflict(ex.format_message())
req.response.status = 204
req.response.content_type = None
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
@util.check_accept('application/json')
def list_traits(req):
context = req.environ['placement.context']
context.can(policies.TRAITS_LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
filters = {}
util.validate_query_params(req, schema.LIST_TRAIT_SCHEMA)
if 'name' in req.GET:
filters = _normalize_traits_qs_param(req.GET['name'])
if 'associated' in req.GET:
if req.GET['associated'].lower() not in ['true', 'false']:
raise webob.exc.HTTPBadRequest(
_('The query parameter "associated" only accepts '
'"true" or "false"'))
filters['associated'] = (
True if req.GET['associated'].lower() == 'true' else False)
traits = rp_obj.TraitList.get_all(context, filters)
req.response.status = 200
output, last_modified = _serialize_traits(traits, want_version)
if want_version.matches((1, 15)):
req.response.last_modified = last_modified
req.response.cache_control = 'no-cache'
req.response.body = encodeutils.to_utf8(jsonutils.dumps(output))
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
@util.check_accept('application/json')
def list_traits_for_resource_provider(req):
context = req.environ['placement.context']
context.can(policies.RP_TRAIT_LIST)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
# Resource provider object is needed for two things: If it is
# NotFound we'll get a 404 here, which needs to happen because
# get_all_by_resource_provider can return an empty list.
# It is also needed for the generation, used in the outgoing
# representation.
try:
rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %(uuid)s found: %(error)s") %
{'uuid': uuid, 'error': exc})
traits = rp_obj.TraitList.get_all_by_resource_provider(context, rp)
response_body, last_modified = _serialize_traits(traits, want_version)
response_body["resource_provider_generation"] = rp.generation
if want_version.matches((1, 15)):
req.response.last_modified = last_modified
req.response.cache_control = 'no-cache'
req.response.status = 200
req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body))
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
@util.require_content('application/json')
def update_traits_for_resource_provider(req):
context = req.environ['placement.context']
context.can(policies.RP_TRAIT_UPDATE)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
uuid = util.wsgi_path_item(req.environ, 'uuid')
data = util.extract_json(req.body, schema.SET_TRAITS_FOR_RP_SCHEMA)
rp_gen = data['resource_provider_generation']
traits = data['traits']
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
if resource_provider.generation != rp_gen:
raise webob.exc.HTTPConflict(
_("Resource provider's generation already changed. Please update "
"the generation and try again."),
json_formatter=util.json_error_formatter,
comment=errors.CONCURRENT_UPDATE)
trait_objs = rp_obj.TraitList.get_all(
context, filters={'name_in': traits})
traits_name = set([obj.name for obj in trait_objs])
non_existed_trait = set(traits) - set(traits_name)
if non_existed_trait:
raise webob.exc.HTTPBadRequest(
_("No such trait %s") % ', '.join(non_existed_trait))
resource_provider.set_traits(trait_objs)
response_body, last_modified = _serialize_traits(trait_objs, want_version)
response_body[
'resource_provider_generation'] = resource_provider.generation
if want_version.matches((1, 15)):
req.response.last_modified = last_modified
req.response.cache_control = 'no-cache'
req.response.status = 200
req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body))
req.response.content_type = 'application/json'
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.6')
def delete_traits_for_resource_provider(req):
context = req.environ['placement.context']
context.can(policies.RP_TRAIT_DELETE)
uuid = util.wsgi_path_item(req.environ, 'uuid')
resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
try:
resource_provider.set_traits(rp_obj.TraitList(objects=[]))
except exception.ConcurrentUpdateDetected as e:
raise webob.exc.HTTPConflict(e.format_message(),
comment=errors.CONCURRENT_UPDATE)
req.response.status = 204
req.response.content_type = None
return req.response

View File

@ -1,120 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API handlers for usage information."""
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import webob
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import microversion
from nova.api.openstack.placement.objects import resource_provider as rp_obj
from nova.api.openstack.placement.policies import usage as policies
from nova.api.openstack.placement.schemas import usage as schema
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova.i18n import _
def _serialize_usages(resource_provider, usage):
usage_dict = {resource.resource_class: resource.usage
for resource in usage}
return {'resource_provider_generation': resource_provider.generation,
'usages': usage_dict}
@wsgi_wrapper.PlacementWsgify
@util.check_accept('application/json')
def list_usages(req):
"""GET a dictionary of resource provider usage by resource class.
If the resource provider does not exist return a 404.
On success return a 200 with an application/json representation of
the usage dictionary.
"""
context = req.environ['placement.context']
context.can(policies.PROVIDER_USAGES)
uuid = util.wsgi_path_item(req.environ, 'uuid')
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# Resource provider object needed for two things: If it is
# NotFound we'll get a 404 here, which needs to happen because
# get_all_by_resource_provider_uuid can return an empty list.
# It is also needed for the generation, used in the outgoing
# representation.
try:
resource_provider = rp_obj.ResourceProvider.get_by_uuid(
context, uuid)
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
_("No resource provider with uuid %(uuid)s found: %(error)s") %
{'uuid': uuid, 'error': exc})
usage = rp_obj.UsageList.get_all_by_resource_provider_uuid(
context, uuid)
response = req.response
response.body = encodeutils.to_utf8(jsonutils.dumps(
_serialize_usages(resource_provider, usage)))
req.response.content_type = 'application/json'
if want_version.matches((1, 15)):
req.response.cache_control = 'no-cache'
# While it would be possible to generate a last-modified time
# based on the collection of allocations that result in a usage
# value (with some spelunking in the SQL) that doesn't align with
# the question that is being asked in a request for usages: What
# is the usage, now? So the last-modified time is set to utcnow.
req.response.last_modified = timeutils.utcnow(with_timezone=True)
return req.response
@wsgi_wrapper.PlacementWsgify
@microversion.version_handler('1.9')
@util.check_accept('application/json')
def get_total_usages(req):
"""GET the sum of usages for a project or a project/user.
On success return a 200 and an application/json body representing the
sum/total of usages.
Return 404 Not Found if the wanted microversion does not match.
"""
context = req.environ['placement.context']
# TODO(mriedem): When we support non-admins to use GET /usages we
# should pass the project_id (and user_id?) from the query parameters
# into context.can() for the target.
context.can(policies.TOTAL_USAGES)
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
util.validate_query_params(req, schema.GET_USAGES_SCHEMA_1_9)
project_id = req.GET.get('project_id')
user_id = req.GET.get('user_id')
usages = rp_obj.UsageList.get_all_by_project_user(context, project_id,
user_id=user_id)
response = req.response
usages_dict = {'usages': {resource.resource_class: resource.usage
for resource in usages}}
response.body = encodeutils.to_utf8(jsonutils.dumps(usages_dict))
req.response.content_type = 'application/json'
if want_version.matches((1, 15)):
req.response.cache_control = 'no-cache'
# While it would be possible to generate a last-modified time
# based on the collection of allocations that result in a usage
# value (with some spelunking in the SQL) that doesn't align with
# the question that is being asked in a request for usages: What
# is the usage, now? So the last-modified time is set to utcnow.
req.response.last_modified = timeutils.utcnow(with_timezone=True)
return req.response

View File

@ -1,53 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Symbols intended to be imported by both placement code and placement API
consumers. When placement is separated out, this module should be part of a
common library that both placement and its consumers can require."""
class RequestGroup(object):
def __init__(self, use_same_provider=True, resources=None,
required_traits=None, forbidden_traits=None, member_of=None):
"""Create a grouping of resource and trait requests.
:param use_same_provider:
If True, (the default) this RequestGroup represents requests for
resources and traits which must be satisfied by a single resource
provider. If False, represents a request for resources and traits
in any resource provider in the same tree, or a sharing provider.
:param resources: A dict of { resource_class: amount, ... }
:param required_traits: A set of { trait_name, ... }
:param forbidden_traits: A set of { trait_name, ... }
:param member_of: A list of [ [aggregate_UUID],
[aggregate_UUID, aggregate_UUID] ... ]
"""
self.use_same_provider = use_same_provider
self.resources = resources or {}
self.required_traits = required_traits or set()
self.forbidden_traits = forbidden_traits or set()
self.member_of = member_of or []
def __str__(self):
ret = 'RequestGroup(use_same_provider=%s' % str(self.use_same_provider)
ret += ', resources={%s}' % ', '.join(
'%s:%d' % (rc, amount)
for rc, amount in sorted(list(self.resources.items())))
ret += ', traits=[%s]' % ', '.join(
sorted(self.required_traits) +
['!%s' % ft for ft in sorted(self.forbidden_traits)])
ret += ', aggregates=[%s]' % ', '.join(
sorted('[%s]' % ', '.join(agglist)
for agglist in sorted(self.member_of)))
ret += ')'
return ret

View File

@ -1,172 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Microversion handling."""
# NOTE(cdent): This code is taken from enamel:
# https://github.com/jaypipes/enamel and was the original source of
# the code now used in microversion_parse library.
import collections
import inspect
import microversion_parse
import webob
SERVICE_TYPE = 'placement'
MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE
VERSIONED_METHODS = collections.defaultdict(list)
# The Canonical Version List
VERSIONS = [
'1.0',
'1.1', # initial support for aggregate.get_aggregates and set_aggregates
'1.2', # Adds /resource_classes resource endpoint
'1.3', # Adds 'member_of' query parameter to get resource providers
# that are members of any of the listed aggregates
'1.4', # Adds resources query string parameter in GET /resource_providers
'1.5', # Adds DELETE /resource_providers/{uuid}/inventories
'1.6', # Adds /traits and /resource_providers{uuid}/traits resource
# endpoints
'1.7', # PUT /resource_classes/{name} is bodiless create or update
'1.8', # Adds 'project_id' and 'user_id' required request parameters to
# PUT /allocations
'1.9', # Adds GET /usages
'1.10', # Adds GET /allocation_candidates resource endpoint
'1.11', # Adds 'allocations' link to the GET /resource_providers response
'1.12', # Add project_id and user_id to GET /allocations/{consumer_uuid}
# and PUT to /allocations/{consumer_uuid} in the same dict form
# as GET. The 'allocation_requests' format in GET
# /allocation_candidates is updated to be the same as well.
'1.13', # Adds POST /allocations to set allocations for multiple consumers
'1.14', # Adds parent and root provider UUID on resource provider
# representation and 'in_tree' filter on GET /resource_providers
'1.15', # Include last-modified and cache-control headers
'1.16', # Add 'limit' query parameter to GET /allocation_candidates
'1.17', # Add 'required' query parameter to GET /allocation_candidates and
# return traits in the provider summary.
'1.18', # Support ?required=<traits> queryparam on GET /resource_providers
'1.19', # Include generation and conflict detection in provider aggregates
# APIs
'1.20', # Return 200 with provider payload from POST /resource_providers
'1.21', # Support ?member_of=in:<agg UUIDs> queryparam on
# GET /allocation_candidates
'1.22', # Support forbidden traits in the required parameter of
# GET /resource_providers and GET /allocation_candidates
'1.23', # Add support for error codes in error response JSON
'1.24', # Support multiple ?member_of=<agg UUIDs> queryparams on
# GET /resource_providers
'1.25', # Adds support for granular resource requests via numbered
# querystring groups in GET /allocation_candidates
'1.26', # Add ability to specify inventory with reserved value equal to
# total.
'1.27', # Include all resource class inventories in `provider_summaries`
# field in response of `GET /allocation_candidates` API even if
# the resource class is not in the requested resources.
'1.28', # Add support for consumer generation
'1.29', # Support nested providers in GET /allocation_candidates API.
'1.30', # Add POST /reshaper for atomically migrating resource provider
# inventories and allocations.
]
def max_version_string():
return VERSIONS[-1]
def min_version_string():
return VERSIONS[0]
# From twisted
# https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py
def _fully_qualified_name(obj):
"""Return the fully qualified name of a module, class, method or function.
Classes and functions need to be module level ones to be correctly
qualified.
"""
try:
name = obj.__qualname__
except AttributeError:
name = obj.__name__
if inspect.isclass(obj) or inspect.isfunction(obj):
moduleName = obj.__module__
return "%s.%s" % (moduleName, name)
elif inspect.ismethod(obj):
try:
cls = obj.im_class
except AttributeError:
# Python 3 eliminates im_class, substitutes __module__ and
# __qualname__ to provide similar information.
return "%s.%s" % (obj.__module__, obj.__qualname__)
else:
className = _fully_qualified_name(cls)
return "%s.%s" % (className, name)
return name
def _find_method(f, version, status_code):
"""Look in VERSIONED_METHODS for method with right name matching version.
If no match is found a HTTPError corresponding to status_code will
be returned.
"""
qualified_name = _fully_qualified_name(f)
# A KeyError shouldn't be possible here, but let's be robust
# just in case.
method_list = VERSIONED_METHODS.get(qualified_name, [])
for min_version, max_version, func in method_list:
if min_version <= version <= max_version:
return func
raise webob.exc.status_map[status_code]
def version_handler(min_ver, max_ver=None, status_code=404):
"""Decorator for versioning API methods.
Add as a decorator to a placement API handler to constrain
the microversions at which it will run. Add after the
``wsgify`` decorator.
This does not check for version intersections. That's the
domain of tests.
:param min_ver: A string of two numerals, X.Y indicating the
minimum version allowed for the decorated method.
:param max_ver: A string of two numerals, X.Y, indicating the
maximum version allowed for the decorated method.
:param status_code: A status code to indicate error, 404 by default
"""
def decorator(f):
min_version = microversion_parse.parse_version_string(min_ver)
if max_ver:
max_version = microversion_parse.parse_version_string(max_ver)
else:
max_version = microversion_parse.parse_version_string(
max_version_string())
qualified_name = _fully_qualified_name(f)
VERSIONED_METHODS[qualified_name].append(
(min_version, max_version, f))
def decorated_func(req, *args, **kwargs):
version = req.environ[MICROVERSION_ENVIRON]
return _find_method(f, version, status_code)(req, *args, **kwargs)
# Sort highest min version to beginning of list.
VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0],
reverse=True)
return decorated_func
return decorator

View File

@ -1,257 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import sqlalchemy as sa
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.db.sqlalchemy import api_models as models
CONSUMER_TBL = models.Consumer.__table__
_ALLOC_TBL = models.Allocation.__table__
@db_api.placement_context_manager.writer
def create_incomplete_consumers(ctx, batch_size):
"""Finds all the consumer records that are missing for allocations and
creates consumer records for them, using the "incomplete consumer" project
and user CONF options.
Returns a tuple containing two identical elements with the number of
consumer records created, since this is the expected return format for data
migration routines.
"""
# Create a record in the projects table for our incomplete project
incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
# Create a record in the users table for our incomplete user
incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
# Create a consumer table record for all consumers where
# allocations.consumer_id doesn't exist in the consumers table. Use the
# incomplete consumer project and user ID.
alloc_to_consumer = sa.outerjoin(
_ALLOC_TBL, CONSUMER_TBL,
_ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
cols = [
_ALLOC_TBL.c.consumer_id,
incomplete_proj_id,
incomplete_user_id,
]
sel = sa.select(cols)
sel = sel.select_from(alloc_to_consumer)
sel = sel.where(CONSUMER_TBL.c.id.is_(None))
# NOTE(mnaser): It is possible to have multiple consumers having many
# allocations to the same resource provider, which would
# make the INSERT FROM SELECT fail due to duplicates.
sel = sel.group_by(_ALLOC_TBL.c.consumer_id)
sel = sel.limit(batch_size)
target_cols = ['uuid', 'project_id', 'user_id']
ins_stmt = CONSUMER_TBL.insert().from_select(target_cols, sel)
res = ctx.session.execute(ins_stmt)
return res.rowcount, res.rowcount
@db_api.placement_context_manager.writer
def delete_consumers_if_no_allocations(ctx, consumer_uuids):
"""Looks to see if any of the supplied consumers has any allocations and if
not, deletes the consumer record entirely.
:param ctx: `nova.api.openstack.placement.context.RequestContext` that
contains an oslo_db Session
:param consumer_uuids: UUIDs of the consumers to check and maybe delete
"""
# Delete consumers that are not referenced in the allocations table
cons_to_allocs_join = sa.outerjoin(
CONSUMER_TBL, _ALLOC_TBL,
CONSUMER_TBL.c.uuid == _ALLOC_TBL.c.consumer_id)
subq = sa.select([CONSUMER_TBL.c.uuid]).select_from(cons_to_allocs_join)
subq = subq.where(sa.and_(
_ALLOC_TBL.c.consumer_id.is_(None),
CONSUMER_TBL.c.uuid.in_(consumer_uuids)))
no_alloc_consumers = [r[0] for r in ctx.session.execute(subq).fetchall()]
del_stmt = CONSUMER_TBL.delete()
del_stmt = del_stmt.where(CONSUMER_TBL.c.uuid.in_(no_alloc_consumers))
ctx.session.execute(del_stmt)
@db_api.placement_context_manager.reader
def _get_consumer_by_uuid(ctx, uuid):
# The SQL for this looks like the following:
# SELECT
# c.id, c.uuid,
# p.id AS project_id, p.external_id AS project_external_id,
# u.id AS user_id, u.external_id AS user_external_id,
# c.updated_at, c.created_at
# FROM consumers c
# INNER JOIN projects p
# ON c.project_id = p.id
# INNER JOIN users u
# ON c.user_id = u.id
# WHERE c.uuid = $uuid
consumers = sa.alias(CONSUMER_TBL, name="c")
projects = sa.alias(project_obj.PROJECT_TBL, name="p")
users = sa.alias(user_obj.USER_TBL, name="u")
cols = [
consumers.c.id,
consumers.c.uuid,
projects.c.id.label("project_id"),
projects.c.external_id.label("project_external_id"),
users.c.id.label("user_id"),
users.c.external_id.label("user_external_id"),
consumers.c.generation,
consumers.c.updated_at,
consumers.c.created_at
]
c_to_p_join = sa.join(
consumers, projects, consumers.c.project_id == projects.c.id)
c_to_u_join = sa.join(
c_to_p_join, users, consumers.c.user_id == users.c.id)
sel = sa.select(cols).select_from(c_to_u_join)
sel = sel.where(consumers.c.uuid == uuid)
res = ctx.session.execute(sel).fetchone()
if not res:
raise exception.ConsumerNotFound(uuid=uuid)
return dict(res)
@db_api.placement_context_manager.writer
def _increment_consumer_generation(ctx, consumer):
"""Increments the supplied consumer's generation value, supplying the
consumer object which contains the currently-known generation. Returns the
newly-incremented generation.
:param ctx: `nova.context.RequestContext` that contains an oslo_db Session
:param consumer: `Consumer` whose generation should be updated.
:returns: The newly-incremented generation.
:raises nova.exception.ConcurrentUpdateDetected: if another thread updated
the same consumer's view of its allocations in between the time
when this object was originally read and the call which modified
the consumer's state (e.g. replacing allocations for a consumer)
"""
consumer_gen = consumer.generation
new_generation = consumer_gen + 1
upd_stmt = CONSUMER_TBL.update().where(sa.and_(
CONSUMER_TBL.c.id == consumer.id,
CONSUMER_TBL.c.generation == consumer_gen)).values(
generation=new_generation)
res = ctx.session.execute(upd_stmt)
if res.rowcount != 1:
raise exception.ConcurrentUpdateDetected
return new_generation
@db_api.placement_context_manager.writer
def _delete_consumer(ctx, consumer):
"""Deletes the supplied consumer.
:param ctx: `nova.context.RequestContext` that contains an oslo_db Session
:param consumer: `Consumer` whose generation should be updated.
"""
del_stmt = CONSUMER_TBL.delete().where(CONSUMER_TBL.c.id == consumer.id)
ctx.session.execute(del_stmt)
@base.VersionedObjectRegistry.register_if(False)
class Consumer(base.VersionedObject, base.TimestampedObject):
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(nullable=False),
'project': fields.ObjectField('Project', nullable=False),
'user': fields.ObjectField('User', nullable=False),
'generation': fields.IntegerField(nullable=False),
}
@staticmethod
def _from_db_object(ctx, target, source):
target.id = source['id']
target.uuid = source['uuid']
target.generation = source['generation']
target.created_at = source['created_at']
target.updated_at = source['updated_at']
target.project = project_obj.Project(
ctx, id=source['project_id'],
external_id=source['project_external_id'])
target.user = user_obj.User(
ctx, id=source['user_id'],
external_id=source['user_external_id'])
target._context = ctx
target.obj_reset_changes()
return target
@classmethod
def get_by_uuid(cls, ctx, uuid):
res = _get_consumer_by_uuid(ctx, uuid)
return cls._from_db_object(ctx, cls(ctx), res)
def create(self):
@db_api.placement_context_manager.writer
def _create_in_db(ctx):
db_obj = models.Consumer(
uuid=self.uuid, project_id=self.project.id,
user_id=self.user.id)
try:
db_obj.save(ctx.session)
# NOTE(jaypipes): We don't do the normal _from_db_object()
# thing here because models.Consumer doesn't have a
# project_external_id or user_external_id attribute.
self.id = db_obj.id
self.generation = db_obj.generation
except db_exc.DBDuplicateEntry:
raise exception.ConsumerExists(uuid=self.uuid)
_create_in_db(self._context)
self.obj_reset_changes()
def update(self):
"""Used to update the consumer's project and user information without
incrementing the consumer's generation.
"""
@db_api.placement_context_manager.writer
def _update_in_db(ctx):
upd_stmt = CONSUMER_TBL.update().values(
project_id=self.project.id, user_id=self.user.id)
# NOTE(jaypipes): We add the generation check to the WHERE clause
# above just for safety. We don't need to check that the statement
# actually updated a single row. If it did not, then the
# consumer.increment_generation() call that happens in
# AllocationList.replace_all() will end up raising
# ConcurrentUpdateDetected anyway
upd_stmt = upd_stmt.where(sa.and_(
CONSUMER_TBL.c.id == self.id,
CONSUMER_TBL.c.generation == self.generation))
ctx.session.execute(upd_stmt)
_update_in_db(self._context)
self.obj_reset_changes()
def increment_generation(self):
"""Increments the consumer's generation.
:raises nova.exception.ConcurrentUpdateDetected: if another thread
updated the same consumer's view of its allocations in between the
time when this object was originally read and the call which
modified the consumer's state (e.g. replacing allocations for a
consumer)
"""
self.generation = _increment_consumer_generation(self._context, self)
def delete(self):
_delete_consumer(self._context, self)

View File

@ -1,92 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import sqlalchemy as sa
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import exception
from nova.db.sqlalchemy import api_models as models
CONF = cfg.CONF
PROJECT_TBL = models.Project.__table__
@db_api.placement_context_manager.writer
def ensure_incomplete_project(ctx):
"""Ensures that a project record is created for the "incomplete consumer
project". Returns the internal ID of that record.
"""
incomplete_id = CONF.placement.incomplete_consumer_project_id
sel = sa.select([PROJECT_TBL.c.id]).where(
PROJECT_TBL.c.external_id == incomplete_id)
res = ctx.session.execute(sel).fetchone()
if res:
return res[0]
ins = PROJECT_TBL.insert().values(external_id=incomplete_id)
res = ctx.session.execute(ins)
return res.inserted_primary_key[0]
@db_api.placement_context_manager.reader
def _get_project_by_external_id(ctx, external_id):
projects = sa.alias(PROJECT_TBL, name="p")
cols = [
projects.c.id,
projects.c.external_id,
projects.c.updated_at,
projects.c.created_at
]
sel = sa.select(cols)
sel = sel.where(projects.c.external_id == external_id)
res = ctx.session.execute(sel).fetchone()
if not res:
raise exception.ProjectNotFound(external_id=external_id)
return dict(res)
@base.VersionedObjectRegistry.register_if(False)
class Project(base.VersionedObject):
fields = {
'id': fields.IntegerField(read_only=True),
'external_id': fields.StringField(nullable=False),
}
@staticmethod
def _from_db_object(ctx, target, source):
for field in target.fields:
setattr(target, field, source[field])
target._context = ctx
target.obj_reset_changes()
return target
@classmethod
def get_by_external_id(cls, ctx, external_id):
res = _get_project_by_external_id(ctx, external_id)
return cls._from_db_object(ctx, cls(ctx), res)
def create(self):
@db_api.placement_context_manager.writer
def _create_in_db(ctx):
db_obj = models.Project(external_id=self.external_id)
try:
db_obj.save(ctx.session)
except db_exc.DBDuplicateEntry:
raise exception.ProjectExists(external_id=self.external_id)
self._from_db_object(ctx, self, db_obj)
_create_in_db(self._context)

File diff suppressed because it is too large Load Diff

View File

@ -1,92 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
import sqlalchemy as sa
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import exception
from nova.db.sqlalchemy import api_models as models
CONF = cfg.CONF
USER_TBL = models.User.__table__
@db_api.placement_context_manager.writer
def ensure_incomplete_user(ctx):
"""Ensures that a user record is created for the "incomplete consumer
user". Returns the internal ID of that record.
"""
incomplete_id = CONF.placement.incomplete_consumer_user_id
sel = sa.select([USER_TBL.c.id]).where(
USER_TBL.c.external_id == incomplete_id)
res = ctx.session.execute(sel).fetchone()
if res:
return res[0]
ins = USER_TBL.insert().values(external_id=incomplete_id)
res = ctx.session.execute(ins)
return res.inserted_primary_key[0]
@db_api.placement_context_manager.reader
def _get_user_by_external_id(ctx, external_id):
users = sa.alias(USER_TBL, name="u")
cols = [
users.c.id,
users.c.external_id,
users.c.updated_at,
users.c.created_at
]
sel = sa.select(cols)
sel = sel.where(users.c.external_id == external_id)
res = ctx.session.execute(sel).fetchone()
if not res:
raise exception.UserNotFound(external_id=external_id)
return dict(res)
@base.VersionedObjectRegistry.register_if(False)
class User(base.VersionedObject):
fields = {
'id': fields.IntegerField(read_only=True),
'external_id': fields.StringField(nullable=False),
}
@staticmethod
def _from_db_object(ctx, target, source):
for field in target.fields:
setattr(target, field, source[field])
target._context = ctx
target.obj_reset_changes()
return target
@classmethod
def get_by_external_id(cls, ctx, external_id):
res = _get_user_by_external_id(ctx, external_id)
return cls._from_db_object(ctx, cls(ctx), res)
def create(self):
@db_api.placement_context_manager.writer
def _create_in_db(ctx):
db_obj = models.User(external_id=self.external_id)
try:
db_obj.save(ctx.session)
except db_exc.DBDuplicateEntry:
raise exception.UserExists(external_id=self.external_id)
self._from_db_object(ctx, self, db_obj)
_create_in_db(self._context)

View File

@ -1,39 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from nova.api.openstack.placement.policies import aggregate
from nova.api.openstack.placement.policies import allocation
from nova.api.openstack.placement.policies import allocation_candidate
from nova.api.openstack.placement.policies import base
from nova.api.openstack.placement.policies import inventory
from nova.api.openstack.placement.policies import reshaper
from nova.api.openstack.placement.policies import resource_class
from nova.api.openstack.placement.policies import resource_provider
from nova.api.openstack.placement.policies import trait
from nova.api.openstack.placement.policies import usage
def list_rules():
return itertools.chain(
base.list_rules(),
resource_provider.list_rules(),
resource_class.list_rules(),
inventory.list_rules(),
aggregate.list_rules(),
usage.list_rules(),
trait.list_rules(),
allocation.list_rules(),
allocation_candidate.list_rules(),
reshaper.list_rules(),
)

View File

@ -1,53 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
PREFIX = 'placement:resource_providers:aggregates:%s'
LIST = PREFIX % 'list'
UPDATE = PREFIX % 'update'
BASE_PATH = '/resource_providers/{uuid}/aggregates'
rules = [
policy.DocumentedRuleDefault(
LIST,
base.RULE_ADMIN_API,
"List resource provider aggregates.",
[
{
'method': 'GET',
'path': BASE_PATH
}
],
scope_types=['system']
),
policy.DocumentedRuleDefault(
UPDATE,
base.RULE_ADMIN_API,
"Update resource provider aggregates.",
[
{
'method': 'PUT',
'path': BASE_PATH
}
],
scope_types=['system']
),
]
def list_rules():
return rules

View File

@ -1,92 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
RP_ALLOC_LIST = 'placement:resource_providers:allocations:list'
ALLOC_PREFIX = 'placement:allocations:%s'
ALLOC_LIST = ALLOC_PREFIX % 'list'
ALLOC_MANAGE = ALLOC_PREFIX % 'manage'
ALLOC_UPDATE = ALLOC_PREFIX % 'update'
ALLOC_DELETE = ALLOC_PREFIX % 'delete'
rules = [
policy.DocumentedRuleDefault(
ALLOC_MANAGE,
base.RULE_ADMIN_API,
"Manage allocations.",
[
{
'method': 'POST',
'path': '/allocations'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
ALLOC_LIST,
base.RULE_ADMIN_API,
"List allocations.",
[
{
'method': 'GET',
'path': '/allocations/{consumer_uuid}'
}
],
scope_types=['system']
),
policy.DocumentedRuleDefault(
ALLOC_UPDATE,
base.RULE_ADMIN_API,
"Update allocations.",
[
{
'method': 'PUT',
'path': '/allocations/{consumer_uuid}'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
ALLOC_DELETE,
base.RULE_ADMIN_API,
"Delete allocations.",
[
{
'method': 'DELETE',
'path': '/allocations/{consumer_uuid}'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
RP_ALLOC_LIST,
base.RULE_ADMIN_API,
"List resource provider allocations.",
[
{
'method': 'GET',
'path': '/resource_providers/{uuid}/allocations'
}
],
scope_types=['system'],
),
]
def list_rules():
return rules

View File

@ -1,38 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
LIST = 'placement:allocation_candidates:list'
rules = [
policy.DocumentedRuleDefault(
LIST,
base.RULE_ADMIN_API,
"List allocation candidates.",
[
{
'method': 'GET',
'path': '/allocation_candidates'
}
],
scope_types=['system'],
)
]
def list_rules():
return rules

View File

@ -1,42 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
RULE_ADMIN_API = 'rule:admin_api'
rules = [
# "placement" is the default rule (action) used for all routes that do
# not yet have granular policy rules. It is used in
# PlacementHandler.__call__ and can be dropped once all routes have
# granular policy handling.
policy.RuleDefault(
"placement",
"role:admin",
description="This rule is used for all routes that do not yet "
"have granular policy rules. It will be replaced "
"with rule:admin_api.",
deprecated_for_removal=True,
deprecated_reason="This was a catch-all rule hard-coded into "
"the placement service and has been superseded by "
"granular policy rules per operation.",
deprecated_since="18.0.0"),
policy.RuleDefault(
"admin_api",
"role:admin",
description="Default rule for most placement APIs.",
scope_types=['system']),
]
def list_rules():
return rules

View File

@ -1,95 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
PREFIX = 'placement:resource_providers:inventories:%s'
LIST = PREFIX % 'list'
CREATE = PREFIX % 'create'
SHOW = PREFIX % 'show'
UPDATE = PREFIX % 'update'
DELETE = PREFIX % 'delete'
BASE_PATH = '/resource_providers/{uuid}/inventories'
rules = [
policy.DocumentedRuleDefault(
LIST,
base.RULE_ADMIN_API,
"List resource provider inventories.",
[
{
'method': 'GET',
'path': BASE_PATH
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
CREATE,
base.RULE_ADMIN_API,
"Create one resource provider inventory.",
[
{
'method': 'POST',
'path': BASE_PATH
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
SHOW,
base.RULE_ADMIN_API,
"Show resource provider inventory.",
[
{
'method': 'GET',
'path': BASE_PATH + '/{resource_class}'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
UPDATE,
base.RULE_ADMIN_API,
"Update resource provider inventory.",
[
{
'method': 'PUT',
'path': BASE_PATH
},
{
'method': 'PUT',
'path': BASE_PATH + '/{resource_class}'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
DELETE,
base.RULE_ADMIN_API,
"Delete resource provider inventory.",
[
{
'method': 'DELETE',
'path': BASE_PATH
},
{
'method': 'DELETE',
'path': BASE_PATH + '/{resource_class}'
}
],
scope_types=['system']),
]
def list_rules():
return rules

View File

@ -1,38 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
PREFIX = 'placement:reshaper:%s'
RESHAPE = PREFIX % 'reshape'
rules = [
policy.DocumentedRuleDefault(
RESHAPE,
base.RULE_ADMIN_API,
"Reshape Inventory and Allocations.",
[
{
'method': 'POST',
'path': '/reshaper'
}
],
scope_types=['system']),
]
def list_rules():
return rules

View File

@ -1,86 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
PREFIX = 'placement:resource_classes:%s'
LIST = PREFIX % 'list'
CREATE = PREFIX % 'create'
SHOW = PREFIX % 'show'
UPDATE = PREFIX % 'update'
DELETE = PREFIX % 'delete'
rules = [
policy.DocumentedRuleDefault(
LIST,
base.RULE_ADMIN_API,
"List resource classes.",
[
{
'method': 'GET',
'path': '/resource_classes'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
CREATE,
base.RULE_ADMIN_API,
"Create resource class.",
[
{
'method': 'POST',
'path': '/resource_classes'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
SHOW,
base.RULE_ADMIN_API,
"Show resource class.",
[
{
'method': 'GET',
'path': '/resource_classes/{name}'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
UPDATE,
base.RULE_ADMIN_API,
"Update resource class.",
[
{
'method': 'PUT',
'path': '/resource_classes/{name}'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
DELETE,
base.RULE_ADMIN_API,
"Delete resource class.",
[
{
'method': 'DELETE',
'path': '/resource_classes/{name}'
}
],
scope_types=['system']),
]
def list_rules():
return rules

View File

@ -1,86 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
PREFIX = 'placement:resource_providers:%s'
LIST = PREFIX % 'list'
CREATE = PREFIX % 'create'
SHOW = PREFIX % 'show'
UPDATE = PREFIX % 'update'
DELETE = PREFIX % 'delete'
rules = [
policy.DocumentedRuleDefault(
LIST,
base.RULE_ADMIN_API,
"List resource providers.",
[
{
'method': 'GET',
'path': '/resource_providers'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
CREATE,
base.RULE_ADMIN_API,
"Create resource provider.",
[
{
'method': 'POST',
'path': '/resource_providers'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
SHOW,
base.RULE_ADMIN_API,
"Show resource provider.",
[
{
'method': 'GET',
'path': '/resource_providers/{uuid}'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
UPDATE,
base.RULE_ADMIN_API,
"Update resource provider.",
[
{
'method': 'PUT',
'path': '/resource_providers/{uuid}'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
DELETE,
base.RULE_ADMIN_API,
"Delete resource provider.",
[
{
'method': 'DELETE',
'path': '/resource_providers/{uuid}'
}
],
scope_types=['system']),
]
def list_rules():
return rules

View File

@ -1,120 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
RP_TRAIT_PREFIX = 'placement:resource_providers:traits:%s'
RP_TRAIT_LIST = RP_TRAIT_PREFIX % 'list'
RP_TRAIT_UPDATE = RP_TRAIT_PREFIX % 'update'
RP_TRAIT_DELETE = RP_TRAIT_PREFIX % 'delete'
TRAITS_PREFIX = 'placement:traits:%s'
TRAITS_LIST = TRAITS_PREFIX % 'list'
TRAITS_SHOW = TRAITS_PREFIX % 'show'
TRAITS_UPDATE = TRAITS_PREFIX % 'update'
TRAITS_DELETE = TRAITS_PREFIX % 'delete'
rules = [
policy.DocumentedRuleDefault(
TRAITS_LIST,
base.RULE_ADMIN_API,
"List traits.",
[
{
'method': 'GET',
'path': '/traits'
}
],
scope_types=['system']
),
policy.DocumentedRuleDefault(
TRAITS_SHOW,
base.RULE_ADMIN_API,
"Show trait.",
[
{
'method': 'GET',
'path': '/traits/{name}'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
TRAITS_UPDATE,
base.RULE_ADMIN_API,
"Update trait.",
[
{
'method': 'PUT',
'path': '/traits/{name}'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
TRAITS_DELETE,
base.RULE_ADMIN_API,
"Delete trait.",
[
{
'method': 'DELETE',
'path': '/traits/{name}'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
RP_TRAIT_LIST,
base.RULE_ADMIN_API,
"List resource provider traits.",
[
{
'method': 'GET',
'path': '/resource_providers/{uuid}/traits'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
RP_TRAIT_UPDATE,
base.RULE_ADMIN_API,
"Update resource provider traits.",
[
{
'method': 'PUT',
'path': '/resource_providers/{uuid}/traits'
}
],
scope_types=['system'],
),
policy.DocumentedRuleDefault(
RP_TRAIT_DELETE,
base.RULE_ADMIN_API,
"Delete resource provider traits.",
[
{
'method': 'DELETE',
'path': '/resource_providers/{uuid}/traits'
}
],
scope_types=['system'],
),
]
def list_rules():
return rules

View File

@ -1,54 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.api.openstack.placement.policies import base
PROVIDER_USAGES = 'placement:resource_providers:usages'
TOTAL_USAGES = 'placement:usages'
rules = [
policy.DocumentedRuleDefault(
PROVIDER_USAGES,
base.RULE_ADMIN_API,
"List resource provider usages.",
[
{
'method': 'GET',
'path': '/resource_providers/{uuid}/usages'
}
],
scope_types=['system']),
policy.DocumentedRuleDefault(
# TODO(mriedem): At some point we might set scope_types=['project']
# so that non-admin project-scoped token users can query usages for
# their project. The context.can() target will need to change as well
# in the actual policy enforcement check in the handler code.
TOTAL_USAGES,
base.RULE_ADMIN_API,
"List total resource usages for a given project.",
[
{
'method': 'GET',
'path': '/usages'
}
],
scope_types=['system'])
]
def list_rules():
return rules

View File

@ -1,94 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Enforcement for placement API."""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import policies
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
_ENFORCER_PLACEMENT = None
def reset():
"""Used to reset the global _ENFORCER_PLACEMENT between test runs."""
global _ENFORCER_PLACEMENT
if _ENFORCER_PLACEMENT:
_ENFORCER_PLACEMENT.clear()
_ENFORCER_PLACEMENT = None
def init():
"""Init an Enforcer class. Sets the _ENFORCER_PLACEMENT global."""
global _ENFORCER_PLACEMENT
if not _ENFORCER_PLACEMENT:
# NOTE(mriedem): We have to explicitly pass in the
# [placement]/policy_file path because otherwise oslo_policy defaults
# to read the policy file from config option [oslo_policy]/policy_file
# which is used by nova. In other words, to have separate policy files
# for placement and nova, we have to use separate policy_file options.
_ENFORCER_PLACEMENT = policy.Enforcer(
CONF, policy_file=CONF.placement.policy_file)
_ENFORCER_PLACEMENT.register_defaults(policies.list_rules())
_ENFORCER_PLACEMENT.load_rules()
def get_enforcer():
# This method is used by oslopolicy CLI scripts in order to generate policy
# files from overrides on disk and defaults in code. We can just pass an
# empty list and let oslo do the config lifting for us.
# TODO(mriedem): Change the project kwarg value to "placement" once
# this code is extracted from nova.
cfg.CONF([], project='nova')
init()
return _ENFORCER_PLACEMENT
def authorize(context, action, target, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param context: instance of
nova.api.openstack.placement.context.RequestContext
:param action: string representing the action to be checked
this should be colon separated for clarity, i.e.
``placement:resource_providers:list``
:param target: dictionary representing the object of the action;
for object creation this should be a dictionary representing the
owner of the object e.g. ``{'project_id': context.project_id}``.
:param do_raise: if True (the default), raises PolicyNotAuthorized;
if False, returns False
:raises nova.api.openstack.placement.exception.PolicyNotAuthorized: if
verification fails and do_raise is True.
:returns: non-False value (not necessarily "True") if authorized, and the
exact value False if not authorized and do_raise is False.
"""
init()
credentials = context.to_policy_values()
try:
# NOTE(mriedem): The "action" kwarg is for the PolicyNotAuthorized exc.
return _ENFORCER_PLACEMENT.authorize(
action, target, credentials, do_raise=do_raise,
exc=exception.PolicyNotAuthorized, action=action)
except policy.PolicyNotRegistered:
with excutils.save_and_reraise_exception():
LOG.exception('Policy not registered')
except Exception:
with excutils.save_and_reraise_exception():
LOG.debug('Policy check for %(action)s failed with credentials '
'%(credentials)s',
{'action': action, 'credentials': credentials})

View File

@ -1,87 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple middleware for request logging."""
from oslo_log import log as logging
from nova.api.openstack.placement import microversion
LOG = logging.getLogger(__name__)
class RequestLog(object):
"""WSGI Middleware to write a simple request log to.
Borrowed from Paste Translogger
"""
format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" '
'status: %(status)s len: %(bytes)s '
'microversion: %(microversion)s')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
LOG.debug('Starting request: %s "%s %s"',
environ['REMOTE_ADDR'], environ['REQUEST_METHOD'],
self._get_uri(environ))
# Set the accept header if it is not otherwise set or is '*/*'. This
# ensures that error responses will be in JSON.
accept = environ.get('HTTP_ACCEPT')
if not accept or accept == '*/*':
environ['HTTP_ACCEPT'] = 'application/json'
if LOG.isEnabledFor(logging.INFO):
return self._log_app(environ, start_response)
else:
return self.application(environ, start_response)
@staticmethod
def _get_uri(environ):
req_uri = (environ.get('SCRIPT_NAME', '')
+ environ.get('PATH_INFO', ''))
if environ.get('QUERY_STRING'):
req_uri += '?' + environ['QUERY_STRING']
return req_uri
def _log_app(self, environ, start_response):
req_uri = self._get_uri(environ)
def replacement_start_response(status, headers, exc_info=None):
"""We need to gaze at the content-length, if set, to
write log info.
"""
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
self.write_log(environ, req_uri, status, size)
return start_response(status, headers, exc_info)
return self.application(environ, replacement_start_response)
def write_log(self, environ, req_uri, status, size):
"""Write the log info out in a formatted form to ``LOG.info``.
"""
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'),
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'status': status.split(None, 1)[0],
'bytes': size,
'microversion': environ.get(
microversion.MICROVERSION_ENVIRON, '-'),
}
LOG.info(self.format, log_format)

View File

@ -1,154 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import lockutils
import sqlalchemy as sa
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import exception
from nova.db.sqlalchemy import api_models as models
from nova import rc_fields as fields
_RC_TBL = models.ResourceClass.__table__
_LOCKNAME = 'rc_cache'
@db_api.placement_context_manager.reader
def _refresh_from_db(ctx, cache):
"""Grabs all custom resource classes from the DB table and populates the
supplied cache object's internal integer and string identifier dicts.
:param cache: ResourceClassCache object to refresh.
"""
with db_api.placement_context_manager.reader.connection.using(ctx) as conn:
sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at,
_RC_TBL.c.created_at])
res = conn.execute(sel).fetchall()
cache.id_cache = {r[1]: r[0] for r in res}
cache.str_cache = {r[0]: r[1] for r in res}
cache.all_cache = {r[1]: r for r in res}
class ResourceClassCache(object):
"""A cache of integer and string lookup values for resource classes."""
# List of dict of all standard resource classes, where every list item
# have a form {'id': <ID>, 'name': <NAME>}
STANDARDS = [{'id': fields.ResourceClass.STANDARD.index(s), 'name': s,
'updated_at': None, 'created_at': None}
for s in fields.ResourceClass.STANDARD]
def __init__(self, ctx):
"""Initialize the cache of resource class identifiers.
:param ctx: `nova.context.RequestContext` from which we can grab a
`SQLAlchemy.Connection` object to use for any DB lookups.
"""
self.ctx = ctx
self.id_cache = {}
self.str_cache = {}
self.all_cache = {}
def clear(self):
with lockutils.lock(_LOCKNAME):
self.id_cache = {}
self.str_cache = {}
self.all_cache = {}
def id_from_string(self, rc_str):
"""Given a string representation of a resource class -- e.g. "DISK_GB"
or "IRON_SILVER" -- return the integer code for the resource class. For
standard resource classes, this integer code will match the list of
resource classes on the fields.ResourceClass field type. Other custom
resource classes will cause a DB lookup into the resource_classes
table, however the results of these DB lookups are cached since the
lookups are so frequent.
:param rc_str: The string representation of the resource class to look
up a numeric identifier for.
:returns integer identifier for the resource class, or None, if no such
resource class was found in the list of standard resource
classes or the resource_classes database table.
:raises `exception.ResourceClassNotFound` if rc_str cannot be found in
either the standard classes or the DB.
"""
# First check the standard resource classes
if rc_str in fields.ResourceClass.STANDARD:
return fields.ResourceClass.STANDARD.index(rc_str)
with lockutils.lock(_LOCKNAME):
if rc_str in self.id_cache:
return self.id_cache[rc_str]
# Otherwise, check the database table
_refresh_from_db(self.ctx, self)
if rc_str in self.id_cache:
return self.id_cache[rc_str]
raise exception.ResourceClassNotFound(resource_class=rc_str)
def all_from_string(self, rc_str):
"""Given a string representation of a resource class -- e.g. "DISK_GB"
or "CUSTOM_IRON_SILVER" -- return all the resource class info.
:param rc_str: The string representation of the resource class for
which to look up a resource_class.
:returns: dict representing the resource class fields, if the
resource class was found in the list of standard
resource classes or the resource_classes database table.
:raises: `exception.ResourceClassNotFound` if rc_str cannot be found in
either the standard classes or the DB.
"""
# First check the standard resource classes
if rc_str in fields.ResourceClass.STANDARD:
return {'id': fields.ResourceClass.STANDARD.index(rc_str),
'name': rc_str,
'updated_at': None,
'created_at': None}
with lockutils.lock(_LOCKNAME):
if rc_str in self.all_cache:
return self.all_cache[rc_str]
# Otherwise, check the database table
_refresh_from_db(self.ctx, self)
if rc_str in self.all_cache:
return self.all_cache[rc_str]
raise exception.ResourceClassNotFound(resource_class=rc_str)
def string_from_id(self, rc_id):
"""The reverse of the id_from_string() method. Given a supplied numeric
identifier for a resource class, we look up the corresponding string
representation, either in the list of standard resource classes or via
a DB lookup. The results of these DB lookups are cached since the
lookups are so frequent.
:param rc_id: The numeric representation of the resource class to look
up a string identifier for.
:returns: string identifier for the resource class, or None, if no such
resource class was found in the list of standard resource
classes or the resource_classes database table.
:raises `exception.ResourceClassNotFound` if rc_id cannot be found in
either the standard classes or the DB.
"""
# First check the fields.ResourceClass.STANDARD values
try:
return fields.ResourceClass.STANDARD[rc_id]
except IndexError:
pass
with lockutils.lock(_LOCKNAME):
if rc_id in self.str_cache:
return self.str_cache[rc_id]
# Otherwise, check the database table
_refresh_from_db(self.ctx, self)
if rc_id in self.str_cache:
return self.str_cache[rc_id]
raise exception.ResourceClassNotFound(resource_class=rc_id)

View File

@ -1,518 +0,0 @@
REST API Version History
~~~~~~~~~~~~~~~~~~~~~~~~
This documents the changes made to the REST API with every microversion change.
The description for each version should be a verbose one which has enough
information to be suitable for use in user documentation.
.. _1.0 (Maximum in Newton):
1.0 Initial Version (Maximum in Newton)
---------------------------------------
.. versionadded:: Newton
This is the initial version of the placement REST API that was released in
Nova 14.0.0 (Newton). This contains the following routes:
* ``/resource_providers``
* ``/resource_providers/allocations``
* ``/resource_providers/inventories``
* ``/resource_providers/usages``
* ``/allocations``
1.1 Resource provider aggregates
--------------------------------
.. versionadded:: Ocata
The 1.1 version adds support for associating aggregates with resource
providers.
The following new operations are added:
``GET /resource_providers/{uuid}/aggregates``
Return all aggregates associated with a resource provider
``PUT /resource_providers/{uuid}/aggregates``
Update the aggregates associated with a resource provider
1.2 Add custom resource classes
-------------------------------
.. versionadded:: Ocata
Placement API version 1.2 adds basic operations allowing an admin to create,
list and delete custom resource classes.
The following new routes are added:
``GET /resource_classes``
Return all resource classes
``POST /resource_classes``
Create a new custom resource class
``PUT /resource_classes/{name}``
Update the name of a custom resource class
``DELETE /resource_classes/{name}``
Delete a custom resource class
``GET /resource_classes/{name}``
Get a single resource class
Custom resource classes must begin with the prefix ``CUSTOM_`` and contain only
the letters A through Z, the numbers 0 through 9 and the underscore ``_``
character.
1.3 member_of query parameter
-----------------------------
.. versionadded:: Ocata
Version 1.3 adds support for listing resource providers that are members of any
of the list of aggregates provided using a ``member_of`` query parameter::
?member_of=in:{agg1_uuid},{agg2_uuid},{agg3_uuid}
1.4 Filter resource providers by requested resource capacity (Maximum in Ocata)
-------------------------------------------------------------------------------
.. versionadded:: Ocata
The 1.4 version adds support for querying resource providers that have the
ability to serve a requested set of resources. A new "resources" query string
parameter is now accepted to the ``GET /resource_providers`` API call. This
parameter indicates the requested amounts of various resources that a provider
must have the capacity to serve. The "resources" query string parameter takes
the form::
?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT
For instance, if the user wishes to see resource providers that can service a
request for 2 vCPUs, 1024 MB of RAM and 50 GB of disk space, the user can issue
a request to::
GET /resource_providers?resources=VCPU:2,MEMORY_MB:1024,DISK_GB:50
If the resource class does not exist, then it will return a HTTP 400.
.. note:: The resources filtering is also based on the `min_unit`, `max_unit`
and `step_size` of the inventory record. For example, if the `max_unit` is
512 for the DISK_GB inventory for a particular resource provider and a
GET request is made for `DISK_GB:1024`, that resource provider will not be
returned. The `min_unit` is the minimum amount of resource that can be
requested for a given inventory and resource provider. The `step_size` is
the increment of resource that can be requested for a given resource on a
given provider.
1.5 DELETE all inventory for a resource provider
------------------------------------------------
.. versionadded:: Pike
Placement API version 1.5 adds DELETE method for deleting all inventory for a
resource provider. The following new method is supported:
``DELETE /resource_providers/{uuid}/inventories``
Delete all inventories for a given resource provider
1.6 Traits API
--------------
.. versionadded:: Pike
The 1.6 version adds basic operations allowing an admin to create, list, and
delete custom traits, also adds basic operations allowing an admin to attach
traits to a resource provider.
The following new routes are added:
``GET /traits``
Return all resource classes.
``PUT /traits/{name}``
Insert a single custom trait.
``GET /traits/{name}``
Check if a trait name exists.
``DELETE /traits/{name}``
Delete the specified trait.
``GET /resource_providers/{uuid}/traits``
Return all traits associated with a specific resource provider.
``PUT /resource_providers/{uuid}/traits``
Update all traits for a specific resource provider.
``DELETE /resource_providers/{uuid}/traits``
Remove any existing trait associations for a specific resource provider
Custom traits must begin with the prefix ``CUSTOM_`` and contain only the
letters A through Z, the numbers 0 through 9 and the underscore ``_``
character.
1.7 Idempotent PUT /resource_classes/{name}
-------------------------------------------
.. versionadded:: Pike
The 1.7 version changes handling of ``PUT /resource_classes/{name}`` to be a
create or verification of the resource class with ``{name}``. If the resource
class is a custom resource class and does not already exist it will be created
and a ``201`` response code returned. If the class already exists the response
code will be ``204``. This makes it possible to check or create a resource
class in one request.
1.8 Require placement 'project_id', 'user_id' in PUT /allocations
-----------------------------------------------------------------
.. versionadded:: Pike
The 1.8 version adds ``project_id`` and ``user_id`` required request parameters
to ``PUT /allocations``.
1.9 Add GET /usages
--------------------
.. versionadded:: Pike
The 1.9 version adds usages that can be queried by a project or project/user.
The following new routes are added:
``GET /usages?project_id=<project_id>``
Return all usages for a given project.
``GET /usages?project_id=<project_id>&user_id=<user_id>``
Return all usages for a given project and user.
1.10 Allocation candidates (Maximum in Pike)
--------------------------------------------
.. versionadded:: Pike
The 1.10 version brings a new REST resource endpoint for getting a list of
allocation candidates. Allocation candidates are collections of possible
allocations against resource providers that can satisfy a particular request
for resources.
1.11 Add 'allocations' link to the ``GET /resource_providers`` response
-----------------------------------------------------------------------
.. versionadded:: Queens
The ``/resource_providers/{rp_uuid}/allocations`` endpoint has been available
since version 1.0, but was not listed in the ``links`` section of the
``GET /resource_providers`` response. The link is included as of version 1.11.
1.12 PUT dict format to /allocations/{consumer_uuid}
----------------------------------------------------
.. versionadded:: Queens
In version 1.12 the request body of a ``PUT /allocations/{consumer_uuid}``
is expected to have an ``object`` for the ``allocations`` property, not as
``array`` as with earlier microversions. This puts the request body more in
alignment with the structure of the ``GET /allocations/{consumer_uuid}``
response body. Because the ``PUT`` request requires ``user_id`` and
``project_id`` in the request body, these fields are added to the ``GET``
response. In addition, the response body for ``GET /allocation_candidates``
is updated so the allocations in the ``alocation_requests`` object work
with the new ``PUT`` format.
1.13 POST multiple allocations to /allocations
----------------------------------------------
.. versionadded:: Queens
Version 1.13 gives the ability to set or clear allocations for more than
one consumer UUID with a request to ``POST /allocations``.
1.14 Add nested resource providers
----------------------------------
.. versionadded:: Queens
The 1.14 version introduces the concept of nested resource providers. The
resource provider resource now contains two new attributes:
* ``parent_provider_uuid`` indicates the provider's direct parent, or null if
there is no parent. This attribute can be set in the call to ``POST
/resource_providers`` and ``PUT /resource_providers/{uuid}`` if the attribute
has not already been set to a non-NULL value (i.e. we do not support
"reparenting" a provider)
* ``root_provider_uuid`` indicates the UUID of the root resource provider in
the provider's tree. This is a read-only attribute
A new ``in_tree=<UUID>`` parameter is now available in the ``GET
/resource-providers`` API call. Supplying a UUID value for the ``in_tree``
parameter will cause all resource providers within the "provider tree" of the
provider matching ``<UUID>`` to be returned.
1.15 Add 'last-modified' and 'cache-control' headers
----------------------------------------------------
.. versionadded:: Queens
Throughout the API, 'last-modified' headers have been added to GET responses
and those PUT and POST responses that have bodies. The value is either the
actual last modified time of the most recently modified associated database
entity or the current time if there is no direct mapping to the database. In
addition, 'cache-control: no-cache' headers are added where the 'last-modified'
header has been added to prevent inadvertent caching of resources.
1.16 Limit allocation candidates
--------------------------------
.. versionadded:: Queens
Add support for a ``limit`` query parameter when making a
``GET /allocation_candidates`` request. The parameter accepts an integer
value, ``N``, which limits the maximum number of candidates returned.
1.17 Add 'required' parameter to the allocation candidates (Maximum in Queens)
------------------------------------------------------------------------------
.. versionadded:: Queens
Add the ``required`` parameter to the ``GET /allocation_candidates`` API. It
accepts a list of traits separated by ``,``. The provider summary in the
response will include the attached traits also.
1.18 Support ?required=<traits> queryparam on GET /resource_providers
---------------------------------------------------------------------
.. versionadded:: Rocky
Add support for the ``required`` query parameter to the ``GET
/resource_providers`` API. It accepts a comma-separated list of string trait
names. When specified, the API results will be filtered to include only
resource providers marked with all the specified traits. This is in addition to
(logical AND) any filtering based on other query parameters.
Trait names which are empty, do not exist, or are otherwise invalid will result
in a 400 error.
1.19 Include generation and conflict detection in provider aggregates APIs
--------------------------------------------------------------------------
.. versionadded:: Rocky
Enhance the payloads for the ``GET /resource_providers/{uuid}/aggregates``
response and the ``PUT /resource_providers/{uuid}/aggregates`` request and
response to be identical, and to include the ``resource_provider_generation``.
As with other generation-aware APIs, if the ``resource_provider_generation``
specified in the ``PUT`` request does not match the generation known by the
server, a 409 Conflict error is returned.
1.20 Return 200 with provider payload from POST /resource_providers
-------------------------------------------------------------------
.. versionadded:: Rocky
The ``POST /resource_providers`` API, on success, returns 200 with a payload
representing the newly-created resource provider, in the same format as the
corresponding ``GET /resource_providers/{uuid}`` call. This is to allow the
caller to glean automatically-set fields, such as UUID and generation, without
a subsequent GET.
1.21 Support ?member_of=<aggregates> queryparam on GET /allocation_candidates
-----------------------------------------------------------------------------
.. versionadded:: Rocky
Add support for the ``member_of`` query parameter to the ``GET
/allocation_candidates`` API. It accepts a comma-separated list of UUIDs for
aggregates. Note that if more than one aggregate UUID is passed, the
comma-separated list must be prefixed with the "in:" operator. If this
parameter is provided, the only resource providers returned will be those in
one of the specified aggregates that meet the other parts of the request.
1.22 Support forbidden traits on resource providers and allocations candidates
------------------------------------------------------------------------------
.. versionadded:: Rocky
Add support for expressing traits which are forbidden when filtering
``GET /resource_providers`` or ``GET /allocation_candidates``. A forbidden
trait is a properly formatted trait in the existing ``required`` parameter,
prefixed by a ``!``. For example ``required=!STORAGE_DISK_SSD`` asks that the
results not include any resource providers that provide solid state disk.
1.23 Include code attribute in JSON error responses
---------------------------------------------------
.. versionadded:: Rocky
JSON formatted error responses gain a new attribute, ``code``, with a value
that identifies the type of this error. This can be used to distinguish errors
that are different but use the same HTTP status code. Any error response which
does not specifically define a code will have the code
``placement.undefined_code``.
1.24 Support multiple ?member_of queryparams
--------------------------------------------
.. versionadded:: Rocky
Add support for specifying multiple ``member_of`` query parameters to the ``GET
/resource_providers`` API. When multiple ``member_of`` query parameters are
found, they are AND'd together in the final query. For example, issuing a
request for ``GET /resource_providers?member_of=agg1&member_of=agg2`` means get
the resource providers that are associated with BOTH agg1 and agg2. Issuing a
request for ``GET /resource_providers?member_of=in:agg1,agg2&member_of=agg3``
means get the resource providers that are associated with agg3 and are also
associated with *any of* (agg1, agg2).
1.25 Granular resource requests to ``GET /allocation_candidates``
-----------------------------------------------------------------
.. versionadded:: Rocky
``GET /allocation_candidates`` is enhanced to accept numbered groupings of
resource, required/forbidden trait, and aggregate association requests. A
``resources`` query parameter key with a positive integer suffix (e.g.
``resources42``) will be logically associated with ``required`` and/or
``member_of`` query parameter keys with the same suffix (e.g. ``required42``,
``member_of42``). The resources, required/forbidden traits, and aggregate
associations in that group will be satisfied by the same resource provider in
the response. When more than one numbered grouping is supplied, the
``group_policy`` query parameter is required to indicate how the groups should
interact. With ``group_policy=none``, separate groupings - numbered or
unnumbered - may or may not be satisfied by the same provider. With
``group_policy=isolate``, numbered groups are guaranteed to be satisfied by
*different* providers - though there may still be overlap with the unnumbered
group. In all cases, each ``allocation_request`` will be satisfied by providers
in a single non-sharing provider tree and/or sharing providers associated via
aggregate with any of the providers in that tree.
The ``required`` and ``member_of`` query parameters for a given group are
optional. That is, you may specify ``resources42=XXX`` without a corresponding
``required42=YYY`` or ``member_of42=ZZZ``. However, the reverse (specifying
``required42=YYY`` or ``member_of42=ZZZ`` without ``resources42=XXX``) will
result in an error.
The semantic of the (unnumbered) ``resources``, ``required``, and ``member_of``
query parameters is unchanged: the resources, traits, and aggregate
associations specified thereby may be satisfied by any provider in the same
non-sharing tree or associated via the specified aggregate(s).
1.26 Allow inventories to have reserved value equal to total
------------------------------------------------------------
.. versionadded:: Rocky
Starting with this version, it is allowed to set the reserved value of the
resource provider inventory to be equal to total.
1.27 Include all resource class inventories in provider_summaries
-----------------------------------------------------------------
.. versionadded:: Rocky
Include all resource class inventories in the ``provider_summaries`` field in
response of the ``GET /allocation_candidates`` API even if the resource class
is not in the requested resources.
1.28 Consumer generation support
--------------------------------
.. versionadded:: Rocky
A new generation field has been added to the consumer concept. Consumers are
the actors that are allocated resources in the placement API. When an
allocation is created, a consumer UUID is specified. Starting with microversion
1.8, a project and user ID are also required. If using microversions prior to
1.8, these are populated from the ``incomplete_consumer_project_id`` and
``incomplete_consumer_user_id`` config options from the ``[placement]``
section.
The consumer generation facilitates safe concurrent modification of an
allocation.
A consumer generation is now returned from the following URIs:
``GET /resource_providers/{uuid}/allocations``
The response continues to be a dict with a key of ``allocations``, which itself
is a dict, keyed by consumer UUID, of allocations against the resource
provider. For each of those dicts, a ``consumer_generation`` field will now be
shown.
``GET /allocations/{consumer_uuid}``
The response continues to be a dict with a key of ``allocations``, which
itself is a dict, keyed by resource provider UUID, of allocations being
consumed by the consumer with the ``{consumer_uuid}``. The top-level dict will
also now contain a ``consumer_generation`` field.
The value of the ``consumer_generation`` field is opaque and should only be
used to send back to subsequent operations on the consumer's allocations.
The ``PUT /allocations/{consumer_uuid}`` URI has been modified to now require a
``consumer_generation`` field in the request payload. This field is required to
be ``null`` if the caller expects that there are no allocations already
existing for the consumer. Otherwise, it should contain the generation that the
caller understands the consumer to be at the time of the call.
A ``409 Conflict`` will be returned from ``PUT /allocations/{consumer_uuid}``
if there was a mismatch between the supplied generation and the consumer's
generation as known by the server. Similarly, a ``409 Conflict`` will be
returned if during the course of replacing the consumer's allocations another
process concurrently changed the consumer's allocations. This allows the caller
to react to the concurrent write by re-reading the consumer's allocations and
re-issuing the call to replace allocations as needed.
The ``PUT /allocations/{consumer_uuid}`` URI has also been modified to accept
an empty allocations object, thereby bringing it to parity with the behaviour
of ``POST /allocations``, which uses an empty allocations object to indicate
that the allocations for a particular consumer should be removed. Passing an
empty allocations object along with a ``consumer_generation`` makes ``PUT
/allocations/{consumer_uuid}`` a **safe** way to delete allocations for a
consumer. The ``DELETE /allocations/{consumer_uuid}`` URI remains unsafe to
call in deployments where multiple callers may simultaneously be attempting to
modify a consumer's allocations.
The ``POST /allocations`` URI variant has also been changed to require a
``consumer_generation`` field in the request payload **for each consumer
involved in the request**. Similar responses to ``PUT
/allocations/{consumer_uuid}`` are returned when any of the consumers
generations conflict with the server's view of those consumers or if any of the
consumers involved in the request are modified by another process.
.. warning:: In all cases, it is absolutely **NOT SAFE** to create and modify
allocations for a consumer using different microversions where one
of the microversions is prior to 1.28. The only way to safely
modify allocations for a consumer and satisfy expectations you
have regarding the prior existence (or lack of existence) of those
allocations is to always use microversion 1.28+ when calling
allocations API endpoints.
1.29 Support allocation candidates with nested resource providers
-----------------------------------------------------------------
.. versionadded:: Rocky
Add support for nested resource providers with the following two features.
1) ``GET /allocation_candidates`` is aware of nested providers. Namely, when
provider trees are present, ``allocation_requests`` in the response of
``GET /allocation_candidates`` can include allocations on combinations of
multiple resource providers in the same tree.
2) ``root_provider_uuid`` and ``parent_provider_uuid`` are added to
``provider_summaries`` in the response of ``GET /allocation_candidates``.
1.30 Provide a /reshaper resource
---------------------------------
Add support for a ``POST /reshaper`` resource that provides for atomically
migrating resource provider inventories and associated allocations when some of
the inventory moves from one resource provider to another, such as when a class
of inventory moves from a parent provider to a new child provider.
.. note:: This is a special operation that should only be used in rare cases
of resource provider topology changing when inventory is in use.
Only use this if you are really sure of what you are doing.

View File

@ -1,42 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Aggregate schemas for Placement API."""
import copy
_AGGREGATES_LIST_SCHEMA = {
"type": "array",
"items": {
"type": "string",
"format": "uuid"
},
"uniqueItems": True
}
PUT_AGGREGATES_SCHEMA_V1_1 = copy.deepcopy(_AGGREGATES_LIST_SCHEMA)
PUT_AGGREGATES_SCHEMA_V1_19 = {
"type": "object",
"properties": {
"aggregates": copy.deepcopy(_AGGREGATES_LIST_SCHEMA),
"resource_provider_generation": {
"type": "integer",
}
},
"required": [
"aggregates",
"resource_provider_generation",
],
"additionalProperties": False,
}

View File

@ -1,169 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API schemas for setting and deleting allocations."""
import copy
from nova.api.openstack.placement.schemas import common
ALLOCATION_SCHEMA = {
"type": "object",
"properties": {
"allocations": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"resource_provider": {
"type": "object",
"properties": {
"uuid": {
"type": "string",
"format": "uuid"
}
},
"additionalProperties": False,
"required": ["uuid"]
},
"resources": {
"type": "object",
"minProperties": 1,
"patternProperties": {
common.RC_PATTERN: {
"type": "integer",
"minimum": 1,
}
},
"additionalProperties": False
}
},
"required": [
"resource_provider",
"resources"
],
"additionalProperties": False
}
}
},
"required": ["allocations"],
"additionalProperties": False
}
ALLOCATION_SCHEMA_V1_8 = copy.deepcopy(ALLOCATION_SCHEMA)
ALLOCATION_SCHEMA_V1_8['properties']['project_id'] = {'type': 'string',
'minLength': 1,
'maxLength': 255}
ALLOCATION_SCHEMA_V1_8['properties']['user_id'] = {'type': 'string',
'minLength': 1,
'maxLength': 255}
ALLOCATION_SCHEMA_V1_8['required'].extend(['project_id', 'user_id'])
# Update the allocation schema to achieve symmetry with the representation
# used when GET /allocations/{consumer_uuid} is called.
# NOTE(cdent): Explicit duplication here for sake of comprehensibility.
ALLOCATION_SCHEMA_V1_12 = {
"type": "object",
"properties": {
"allocations": {
"type": "object",
"minProperties": 1,
# resource provider uuid
"patternProperties": {
common.UUID_PATTERN: {
"type": "object",
"properties": {
# generation is optional
"generation": {
"type": "integer",
},
"resources": {
"type": "object",
"minProperties": 1,
# resource class
"patternProperties": {
common.RC_PATTERN: {
"type": "integer",
"minimum": 1,
}
},
"additionalProperties": False
}
},
"required": ["resources"],
"additionalProperties": False
}
},
"additionalProperties": False
},
"project_id": {
"type": "string",
"minLength": 1,
"maxLength": 255
},
"user_id": {
"type": "string",
"minLength": 1,
"maxLength": 255
}
},
"additionalProperties": False,
"required": [
"allocations",
"project_id",
"user_id"
]
}
# POST to /allocations, added in microversion 1.13, uses the
# POST_ALLOCATIONS_V1_13 schema to allow multiple allocations
# from multiple consumers in one request. It is a dict, keyed by
# consumer uuid, using the form of PUT allocations from microversion
# 1.12. In POST the allocations can be empty, so DELETABLE_ALLOCATIONS
# modifies ALLOCATION_SCHEMA_V1_12 accordingly.
DELETABLE_ALLOCATIONS = copy.deepcopy(ALLOCATION_SCHEMA_V1_12)
DELETABLE_ALLOCATIONS['properties']['allocations']['minProperties'] = 0
POST_ALLOCATIONS_V1_13 = {
"type": "object",
"minProperties": 1,
"additionalProperties": False,
"patternProperties": {
common.UUID_PATTERN: DELETABLE_ALLOCATIONS
}
}
# A required consumer generation was added to the top-level dict in this
# version of PUT /allocations/{consumer_uuid}. In addition, the PUT
# /allocations/{consumer_uuid}/now allows for empty allocations (indicating the
# allocations are being removed)
ALLOCATION_SCHEMA_V1_28 = copy.deepcopy(DELETABLE_ALLOCATIONS)
ALLOCATION_SCHEMA_V1_28['properties']['consumer_generation'] = {
"type": ["integer", "null"],
"additionalProperties": False
}
ALLOCATION_SCHEMA_V1_28['required'].append("consumer_generation")
# A required consumer generation was added to the allocations dicts in this
# version of POST /allocations
REQUIRED_GENERATION_ALLOCS_POST = copy.deepcopy(DELETABLE_ALLOCATIONS)
alloc_props = REQUIRED_GENERATION_ALLOCS_POST['properties']
alloc_props['consumer_generation'] = {
"type": ["integer", "null"],
"additionalProperties": False
}
REQUIRED_GENERATION_ALLOCS_POST['required'].append("consumer_generation")
POST_ALLOCATIONS_V1_28 = copy.deepcopy(POST_ALLOCATIONS_V1_13)
POST_ALLOCATIONS_V1_28["patternProperties"] = {
common.UUID_PATTERN: REQUIRED_GENERATION_ALLOCS_POST
}

View File

@ -1,78 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API schemas for getting allocation candidates."""
import copy
# Represents the allowed query string parameters to the GET
# /allocation_candidates API call
GET_SCHEMA_1_10 = {
"type": "object",
"properties": {
"resources": {
"type": "string"
},
},
"required": [
"resources",
],
"additionalProperties": False,
}
# Add limit query parameter.
GET_SCHEMA_1_16 = copy.deepcopy(GET_SCHEMA_1_10)
GET_SCHEMA_1_16['properties']['limit'] = {
# A query parameter is always a string in webOb, but
# we'll handle integer here as well.
"type": ["integer", "string"],
"pattern": "^[1-9][0-9]*$",
"minimum": 1,
"minLength": 1
}
# Add required parameter.
GET_SCHEMA_1_17 = copy.deepcopy(GET_SCHEMA_1_16)
GET_SCHEMA_1_17['properties']['required'] = {
"type": ["string"]
}
# Add member_of parameter.
GET_SCHEMA_1_21 = copy.deepcopy(GET_SCHEMA_1_17)
GET_SCHEMA_1_21['properties']['member_of'] = {
"type": ["string"]
}
GET_SCHEMA_1_25 = copy.deepcopy(GET_SCHEMA_1_21)
# We're going to *replace* 'resources', 'required', and 'member_of'.
del GET_SCHEMA_1_25["properties"]["resources"]
del GET_SCHEMA_1_25["required"]
del GET_SCHEMA_1_25["properties"]["required"]
del GET_SCHEMA_1_25["properties"]["member_of"]
# Pattern property key format for a numbered or un-numbered grouping
_GROUP_PAT_FMT = "^%s([1-9][0-9]*)?$"
GET_SCHEMA_1_25["patternProperties"] = {
_GROUP_PAT_FMT % "resources": {
"type": "string",
},
_GROUP_PAT_FMT % "required": {
"type": "string",
},
_GROUP_PAT_FMT % "member_of": {
"type": "string",
},
}
GET_SCHEMA_1_25["properties"]["group_policy"] = {
"type": "string",
"enum": ["none", "isolate"],
}

View File

@ -1,22 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
_UUID_CHAR = "[0-9a-fA-F-]"
# TODO(efried): Use this stricter pattern, and replace string/uuid with it:
# UUID_PATTERN = "^%s{8}-%s{4}-%s{4}-%s{4}-%s{12}$" % ((_UUID_CHAR,) * 5)
UUID_PATTERN = "^%s{36}$" % _UUID_CHAR
_RC_TRAIT_CHAR = "[A-Z0-9_]"
_RC_TRAIT_PATTERN = "^%s+$" % _RC_TRAIT_CHAR
RC_PATTERN = _RC_TRAIT_PATTERN
_CUSTOM_RC_TRAIT_PATTERN = "^CUSTOM_%s+$" % _RC_TRAIT_CHAR
CUSTOM_RC_PATTERN = _CUSTOM_RC_TRAIT_PATTERN
CUSTOM_TRAIT_PATTERN = _CUSTOM_RC_TRAIT_PATTERN

View File

@ -1,93 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inventory schemas for Placement API."""
import copy
from nova.api.openstack.placement.schemas import common
from nova.db import constants as db_const
BASE_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"total": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1,
},
"reserved": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 0,
},
"min_unit": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1
},
"max_unit": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1
},
"step_size": {
"type": "integer",
"maximum": db_const.MAX_INT,
"minimum": 1
},
"allocation_ratio": {
"type": "number",
"maximum": db_const.SQL_SP_FLOAT_MAX
},
},
"required": [
"total",
"resource_provider_generation"
],
"additionalProperties": False
}
POST_INVENTORY_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
POST_INVENTORY_SCHEMA['properties']['resource_class'] = {
"type": "string",
"pattern": common.RC_PATTERN,
}
POST_INVENTORY_SCHEMA['required'].append('resource_class')
POST_INVENTORY_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_RECORD_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
PUT_INVENTORY_RECORD_SCHEMA['required'].remove('resource_provider_generation')
PUT_INVENTORY_SCHEMA = {
"type": "object",
"properties": {
"resource_provider_generation": {
"type": "integer"
},
"inventories": {
"type": "object",
"patternProperties": {
common.RC_PATTERN: PUT_INVENTORY_RECORD_SCHEMA,
}
}
},
"required": [
"resource_provider_generation",
"inventories"
],
"additionalProperties": False
}

View File

@ -1,47 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Reshaper schema for Placement API."""
import copy
from nova.api.openstack.placement.schemas import allocation
from nova.api.openstack.placement.schemas import common
from nova.api.openstack.placement.schemas import inventory
ALLOCATIONS = copy.deepcopy(allocation.POST_ALLOCATIONS_V1_28)
# In the reshaper we need to allow allocations to be an empty dict
# because it may be the case that there simply are no allocations
# (now) for any of the inventory being moved.
ALLOCATIONS['minProperties'] = 0
POST_RESHAPER_SCHEMA = {
"type": "object",
"properties": {
"inventories": {
"type": "object",
"patternProperties": {
# resource provider uuid
common.UUID_PATTERN: inventory.PUT_INVENTORY_SCHEMA,
},
# We expect at least one inventories, otherwise there is no reason
# to call the reshaper.
"minProperties": 1,
"additionalProperties": False,
},
"allocations": ALLOCATIONS,
},
"required": [
"inventories",
"allocations",
],
"additionalProperties": False,
}

View File

@ -1,33 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API schemas for resource classes."""
import copy
from nova.api.openstack.placement.schemas import common
POST_RC_SCHEMA_V1_2 = {
"type": "object",
"properties": {
"name": {
"type": "string",
"pattern": common.CUSTOM_RC_PATTERN,
"maxLength": 255,
},
},
"required": [
"name"
],
"additionalProperties": False,
}
PUT_RC_SCHEMA_V1_2 = copy.deepcopy(POST_RC_SCHEMA_V1_2)

View File

@ -1,106 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API schemas for resource providers."""
import copy
POST_RESOURCE_PROVIDER_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string",
"maxLength": 200
},
"uuid": {
"type": "string",
"format": "uuid"
}
},
"required": [
"name"
],
"additionalProperties": False,
}
# Remove uuid to create the schema for PUTting a resource provider
PUT_RESOURCE_PROVIDER_SCHEMA = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA)
PUT_RESOURCE_PROVIDER_SCHEMA['properties'].pop('uuid')
# Placement API microversion 1.14 adds an optional parent_provider_uuid field
# to the POST and PUT request schemas
POST_RP_SCHEMA_V1_14 = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA)
POST_RP_SCHEMA_V1_14["properties"]["parent_provider_uuid"] = {
"anyOf": [
{
"type": "string",
"format": "uuid",
},
{
"type": "null",
}
]
}
PUT_RP_SCHEMA_V1_14 = copy.deepcopy(POST_RP_SCHEMA_V1_14)
PUT_RP_SCHEMA_V1_14['properties'].pop('uuid')
# Represents the allowed query string parameters to the GET /resource_providers
# API call
GET_RPS_SCHEMA_1_0 = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"uuid": {
"type": "string",
"format": "uuid"
}
},
"additionalProperties": False,
}
# Placement API microversion 1.3 adds support for a member_of attribute
GET_RPS_SCHEMA_1_3 = copy.deepcopy(GET_RPS_SCHEMA_1_0)
GET_RPS_SCHEMA_1_3['properties']['member_of'] = {
"type": "string"
}
# Placement API microversion 1.4 adds support for requesting resource providers
# having some set of capacity for some resources. The query string is a
# comma-delimited set of "$RESOURCE_CLASS_NAME:$AMOUNT" strings. The validation
# of the string is left up to the helper code in the
# normalize_resources_qs_param() function.
GET_RPS_SCHEMA_1_4 = copy.deepcopy(GET_RPS_SCHEMA_1_3)
GET_RPS_SCHEMA_1_4['properties']['resources'] = {
"type": "string"
}
# Placement API microversion 1.14 adds support for requesting resource
# providers within a tree of providers. The 'in_tree' query string parameter
# should be the UUID of a resource provider. The result of the GET call will
# include only those resource providers in the same "provider tree" as the
# provider with the UUID represented by 'in_tree'
GET_RPS_SCHEMA_1_14 = copy.deepcopy(GET_RPS_SCHEMA_1_4)
GET_RPS_SCHEMA_1_14['properties']['in_tree'] = {
"type": "string",
"format": "uuid",
}
# Microversion 1.18 adds support for the `required` query parameter to the
# `GET /resource_providers` API. It accepts a comma-separated list of string
# trait names. When specified, the API results will be filtered to include only
# resource providers marked with all the specified traits. This is in addition
# to (logical AND) any filtering based on other query parameters.
GET_RPS_SCHEMA_1_18 = copy.deepcopy(GET_RPS_SCHEMA_1_14)
GET_RPS_SCHEMA_1_18['properties']['required'] = {
"type": "string",
}

View File

@ -1,56 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Trait schemas for Placement API."""
import copy
from nova.api.openstack.placement.schemas import common
TRAIT = {
"type": "string",
'minLength': 1, 'maxLength': 255,
}
CUSTOM_TRAIT = copy.deepcopy(TRAIT)
CUSTOM_TRAIT.update({"pattern": common.CUSTOM_TRAIT_PATTERN})
PUT_TRAITS_SCHEMA = {
"type": "object",
"properties": {
"traits": {
"type": "array",
"items": CUSTOM_TRAIT,
}
},
'required': ['traits'],
'additionalProperties': False
}
SET_TRAITS_FOR_RP_SCHEMA = copy.deepcopy(PUT_TRAITS_SCHEMA)
SET_TRAITS_FOR_RP_SCHEMA['properties']['traits']['items'] = TRAIT
SET_TRAITS_FOR_RP_SCHEMA['properties'][
'resource_provider_generation'] = {'type': 'integer'}
SET_TRAITS_FOR_RP_SCHEMA['required'].append('resource_provider_generation')
LIST_TRAIT_SCHEMA = {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"associated": {
"type": "string",
}
},
"additionalProperties": False
}

View File

@ -1,33 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Placement API schemas for usage information."""
# Represents the allowed query string parameters to GET /usages
GET_USAGES_SCHEMA_1_9 = {
"type": "object",
"properties": {
"project_id": {
"type": "string",
"minLength": 1,
"maxLength": 255,
},
"user_id": {
"type": "string",
"minLength": 1,
"maxLength": 255,
},
},
"required": [
"project_id"
],
"additionalProperties": False,
}

View File

@ -1,697 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for placement API."""
import functools
import re
import jsonschema
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import request_id
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
from nova.api.openstack.placement import errors
from nova.api.openstack.placement import exception
from nova.api.openstack.placement import lib as placement_lib
# NOTE(cdent): avoid cyclical import conflict between util and
# microversion
import nova.api.openstack.placement.microversion
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.api.openstack.placement.objects import project as project_obj
from nova.api.openstack.placement.objects import user as user_obj
from nova.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# Error code handling constants
ENV_ERROR_CODE = 'placement.error_code'
ERROR_CODE_MICROVERSION = (1, 23)
# Querystring-related constants
_QS_RESOURCES = 'resources'
_QS_REQUIRED = 'required'
_QS_MEMBER_OF = 'member_of'
_QS_KEY_PATTERN = re.compile(
r"^(%s)([1-9][0-9]*)?$" % '|'.join(
(_QS_RESOURCES, _QS_REQUIRED, _QS_MEMBER_OF)))
# NOTE(cdent): This registers a FormatChecker on the jsonschema
# module. Do not delete this code! Although it appears that nothing
# is using the decorated method it is being used in JSON schema
# validations to check uuid formatted strings.
@jsonschema.FormatChecker.cls_checks('uuid')
def _validate_uuid_format(instance):
return uuidutils.is_uuid_like(instance)
def check_accept(*types):
"""If accept is set explicitly, try to follow it.
If there is no match for the incoming accept header
send a 406 response code.
If accept is not set send our usual content-type in
response.
"""
def decorator(f):
@functools.wraps(f)
def decorated_function(req):
if req.accept:
best_matches = req.accept.acceptable_offers(types)
if not best_matches:
type_string = ', '.join(types)
raise webob.exc.HTTPNotAcceptable(
_('Only %(type)s is provided') % {'type': type_string},
json_formatter=json_error_formatter)
return f(req)
return decorated_function
return decorator
def extract_json(body, schema):
"""Extract JSON from a body and validate with the provided schema."""
try:
data = jsonutils.loads(body)
except ValueError as exc:
raise webob.exc.HTTPBadRequest(
_('Malformed JSON: %(error)s') % {'error': exc},
json_formatter=json_error_formatter)
try:
jsonschema.validate(data, schema,
format_checker=jsonschema.FormatChecker())
except jsonschema.ValidationError as exc:
raise webob.exc.HTTPBadRequest(
_('JSON does not validate: %(error)s') % {'error': exc},
json_formatter=json_error_formatter)
return data
def inventory_url(environ, resource_provider, resource_class=None):
url = '%s/inventories' % resource_provider_url(environ, resource_provider)
if resource_class:
url = '%s/%s' % (url, resource_class)
return url
def json_error_formatter(body, status, title, environ):
"""A json_formatter for webob exceptions.
Follows API-WG guidelines at
http://specs.openstack.org/openstack/api-wg/guidelines/errors.html
"""
# Shortcut to microversion module, to avoid wraps below.
microversion = nova.api.openstack.placement.microversion
# Clear out the html that webob sneaks in.
body = webob.exc.strip_tags(body)
# Get status code out of status message. webob's error formatter
# only passes entire status string.
status_code = int(status.split(None, 1)[0])
error_dict = {
'status': status_code,
'title': title,
'detail': body
}
# Version may not be set if we have experienced an error before it
# is set.
want_version = environ.get(microversion.MICROVERSION_ENVIRON)
if want_version and want_version.matches(ERROR_CODE_MICROVERSION):
error_dict['code'] = environ.get(ENV_ERROR_CODE, errors.DEFAULT)
# If the request id middleware has had a chance to add an id,
# put it in the error response.
if request_id.ENV_REQUEST_ID in environ:
error_dict['request_id'] = environ[request_id.ENV_REQUEST_ID]
# When there is a no microversion in the environment and a 406,
# microversion parsing failed so we need to include microversion
# min and max information in the error response.
if status_code == 406 and microversion.MICROVERSION_ENVIRON not in environ:
error_dict['max_version'] = microversion.max_version_string()
error_dict['min_version'] = microversion.min_version_string()
return {'errors': [error_dict]}
def pick_last_modified(last_modified, obj):
"""Choose max of last_modified and obj.updated_at or obj.created_at.
If updated_at is not implemented in `obj` use the current time in UTC.
"""
try:
current_modified = (obj.updated_at or obj.created_at)
except NotImplementedError:
# If updated_at is not implemented, we are looking at objects that
# have not come from the database, so "now" is the right modified
# time.
current_modified = timeutils.utcnow(with_timezone=True)
if last_modified:
last_modified = max(last_modified, current_modified)
else:
last_modified = current_modified
return last_modified
def require_content(content_type):
"""Decorator to require a content type in a handler."""
def decorator(f):
@functools.wraps(f)
def decorated_function(req):
if req.content_type != content_type:
# webob's unset content_type is the empty string so
# set it the error message content to 'None' to make
# a useful message in that case. This also avoids a
# KeyError raised when webob.exc eagerly fills in a
# Template for output we will never use.
if not req.content_type:
req.content_type = 'None'
raise webob.exc.HTTPUnsupportedMediaType(
_('The media type %(bad_type)s is not supported, '
'use %(good_type)s') %
{'bad_type': req.content_type,
'good_type': content_type},
json_formatter=json_error_formatter)
else:
return f(req)
return decorated_function
return decorator
def resource_class_url(environ, resource_class):
"""Produce the URL for a resource class.
If SCRIPT_NAME is present, it is the mount point of the placement
WSGI app.
"""
prefix = environ.get('SCRIPT_NAME', '')
return '%s/resource_classes/%s' % (prefix, resource_class.name)
def resource_provider_url(environ, resource_provider):
"""Produce the URL for a resource provider.
If SCRIPT_NAME is present, it is the mount point of the placement
WSGI app.
"""
prefix = environ.get('SCRIPT_NAME', '')
return '%s/resource_providers/%s' % (prefix, resource_provider.uuid)
def trait_url(environ, trait):
"""Produce the URL for a trait.
If SCRIPT_NAME is present, it is the mount point of the placement
WSGI app.
"""
prefix = environ.get('SCRIPT_NAME', '')
return '%s/traits/%s' % (prefix, trait.name)
def validate_query_params(req, schema):
try:
# NOTE(Kevin_Zheng): The webob package throws UnicodeError when
# param cannot be decoded. Catch this and raise HTTP 400.
jsonschema.validate(dict(req.GET), schema,
format_checker=jsonschema.FormatChecker())
except (jsonschema.ValidationError, UnicodeDecodeError) as exc:
raise webob.exc.HTTPBadRequest(
_('Invalid query string parameters: %(exc)s') %
{'exc': exc})
def wsgi_path_item(environ, name):
"""Extract the value of a named field in a URL.
Return None if the name is not present or there are no path items.
"""
# NOTE(cdent): For the time being we don't need to urldecode
# the value as the entire placement API has paths that accept no
# encoded values.
try:
return environ['wsgiorg.routing_args'][1][name]
except (KeyError, IndexError):
return None
def normalize_resources_qs_param(qs):
"""Given a query string parameter for resources, validate it meets the
expected format and return a dict of amounts, keyed by resource class name.
The expected format of the resources parameter looks like so:
$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT
So, if the user was looking for resource providers that had room for an
instance that will consume 2 vCPUs, 1024 MB of RAM and 50GB of disk space,
they would use the following query string:
?resources=VCPU:2,MEMORY_MB:1024,DISK_GB:50
The returned value would be:
{
"VCPU": 2,
"MEMORY_MB": 1024,
"DISK_GB": 50,
}
:param qs: The value of the 'resources' query string parameter
:raises `webob.exc.HTTPBadRequest` if the parameter's value isn't in the
expected format.
"""
if qs.strip() == "":
msg = _('Badly formed resources parameter. Expected resources '
'query string parameter in form: '
'?resources=VCPU:2,MEMORY_MB:1024. Got: empty string.')
raise webob.exc.HTTPBadRequest(msg)
result = {}
resource_tuples = qs.split(',')
for rt in resource_tuples:
try:
rc_name, amount = rt.split(':')
except ValueError:
msg = _('Badly formed resources parameter. Expected resources '
'query string parameter in form: '
'?resources=VCPU:2,MEMORY_MB:1024. Got: %s.')
msg = msg % rt
raise webob.exc.HTTPBadRequest(msg)
try:
amount = int(amount)
except ValueError:
msg = _('Requested resource %(resource_name)s expected positive '
'integer amount. Got: %(amount)s.')
msg = msg % {
'resource_name': rc_name,
'amount': amount,
}
raise webob.exc.HTTPBadRequest(msg)
if amount < 1:
msg = _('Requested resource %(resource_name)s requires '
'amount >= 1. Got: %(amount)d.')
msg = msg % {
'resource_name': rc_name,
'amount': amount,
}
raise webob.exc.HTTPBadRequest(msg)
result[rc_name] = amount
return result
def valid_trait(trait, allow_forbidden):
"""Return True if the provided trait is the expected form.
When allow_forbidden is True, then a leading '!' is acceptable.
"""
if trait.startswith('!') and not allow_forbidden:
return False
return True
def normalize_traits_qs_param(val, allow_forbidden=False):
"""Parse a traits query string parameter value.
Note that this method doesn't know or care about the query parameter key,
which may currently be of the form `required`, `required123`, etc., but
which may someday also include `preferred`, etc.
This method currently does no format validation of trait strings, other
than to ensure they're not zero-length.
:param val: A traits query parameter value: a comma-separated string of
trait names.
:param allow_forbidden: If True, accept forbidden traits (that is, traits
prefixed by '!') as a valid form when notifying
the caller that the provided value is not properly
formed.
:return: A set of trait names.
:raises `webob.exc.HTTPBadRequest` if the val parameter is not in the
expected format.
"""
ret = set(substr.strip() for substr in val.split(','))
expected_form = 'HW_CPU_X86_VMX,CUSTOM_MAGIC'
if allow_forbidden:
expected_form = 'HW_CPU_X86_VMX,!CUSTOM_MAGIC'
if not all(trait and valid_trait(trait, allow_forbidden) for trait in ret):
msg = _("Invalid query string parameters: Expected 'required' "
"parameter value of the form: %(form)s. "
"Got: %(val)s") % {'form': expected_form, 'val': val}
raise webob.exc.HTTPBadRequest(msg)
return ret
def normalize_member_of_qs_params(req, suffix=''):
"""Given a webob.Request object, validate that the member_of querystring
parameters are correct. We begin supporting multiple member_of params in
microversion 1.24.
:param req: webob.Request object
:return: A list containing sets of UUIDs of aggregates to filter on
:raises `webob.exc.HTTPBadRequest` if the microversion requested is <1.24
and the request contains multiple member_of querystring params
:raises `webob.exc.HTTPBadRequest` if the val parameter is not in the
expected format.
"""
microversion = nova.api.openstack.placement.microversion
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
multi_member_of = want_version.matches((1, 24))
if not multi_member_of and len(req.GET.getall('member_of' + suffix)) > 1:
raise webob.exc.HTTPBadRequest(
_('Multiple member_of%s parameters are not supported') % suffix)
values = []
for value in req.GET.getall('member_of' + suffix):
values.append(normalize_member_of_qs_param(value))
return values
def normalize_member_of_qs_param(value):
"""Parse a member_of query string parameter value.
Valid values are either a single UUID, or the prefix 'in:' followed by two
or more comma-separated UUIDs.
:param value: A member_of query parameter of either a single UUID, or a
comma-separated string of two or more UUIDs, prefixed with
the "in:" operator
:return: A set of UUIDs
:raises `webob.exc.HTTPBadRequest` if the value parameter is not in the
expected format.
"""
if "," in value and not value.startswith("in:"):
msg = _("Multiple values for 'member_of' must be prefixed with the "
"'in:' keyword. Got: %s") % value
raise webob.exc.HTTPBadRequest(msg)
if value.startswith('in:'):
value = set(value[3:].split(','))
else:
value = set([value])
# Make sure the values are actually UUIDs.
for aggr_uuid in value:
if not uuidutils.is_uuid_like(aggr_uuid):
msg = _("Invalid query string parameters: Expected 'member_of' "
"parameter to contain valid UUID(s). Got: %s") % aggr_uuid
raise webob.exc.HTTPBadRequest(msg)
return value
def parse_qs_request_groups(req):
"""Parse numbered resources, traits, and member_of groupings out of a
querystring dict.
The input qsdict represents a query string of the form:
?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT
&required=$TRAIT_NAME,$TRAIT_NAME&member_of=in:$AGG1_UUID,$AGG2_UUID
&resources1=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT
&required1=$TRAIT_NAME,$TRAIT_NAME&member_of1=$AGG_UUID
&resources2=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT
&required2=$TRAIT_NAME,$TRAIT_NAME&member_of2=$AGG_UUID
These are parsed in groups according to the numeric suffix of the key.
For each group, a RequestGroup instance is created containing that group's
resources, required traits, and member_of. For the (single) group with no
suffix, the RequestGroup.use_same_provider attribute is False; for the
numbered groups it is True.
If a trait in the required parameter is prefixed with ``!`` this
indicates that that trait must not be present on the resource
providers in the group. That is, the trait is forbidden. Forbidden traits
are only processed if ``allow_forbidden`` is True. This allows the
caller to control processing based on microversion handling.
The return is a dict, keyed by the numeric suffix of these RequestGroup
instances (or the empty string for the unnumbered group).
As an example, if qsdict represents the query string:
?resources=VCPU:2,MEMORY_MB:1024,DISK_GB=50
&required=HW_CPU_X86_VMX,CUSTOM_STORAGE_RAID
&member_of=in:9323b2b1-82c9-4e91-bdff-e95e808ef954,8592a199-7d73-4465-8df6-ab00a6243c82 # noqa
&resources1=SRIOV_NET_VF:2
&required1=CUSTOM_PHYSNET_PUBLIC,CUSTOM_SWITCH_A
&resources2=SRIOV_NET_VF:1
&required2=!CUSTOM_PHYSNET_PUBLIC
...the return value will be:
{ '': RequestGroup(
use_same_provider=False,
resources={
"VCPU": 2,
"MEMORY_MB": 1024,
"DISK_GB" 50,
},
required_traits=[
"HW_CPU_X86_VMX",
"CUSTOM_STORAGE_RAID",
],
member_of=[
[9323b2b1-82c9-4e91-bdff-e95e808ef954],
[8592a199-7d73-4465-8df6-ab00a6243c82,
ddbd9226-d6a6-475e-a85f-0609914dd058],
],
),
'1': RequestGroup(
use_same_provider=True,
resources={
"SRIOV_NET_VF": 2,
},
required_traits=[
"CUSTOM_PHYSNET_PUBLIC",
"CUSTOM_SWITCH_A",
],
),
'2': RequestGroup(
use_same_provider=True,
resources={
"SRIOV_NET_VF": 1,
},
forbidden_traits=[
"CUSTOM_PHYSNET_PUBLIC",
],
),
}
:param req: webob.Request object
:return: A list of RequestGroup instances.
:raises `webob.exc.HTTPBadRequest` if any value is malformed, or if a
trait list is given without corresponding resources.
"""
microversion = nova.api.openstack.placement.microversion
want_version = req.environ[microversion.MICROVERSION_ENVIRON]
# Control whether we handle forbidden traits.
allow_forbidden = want_version.matches((1, 22))
# Temporary dict of the form: { suffix: RequestGroup }
by_suffix = {}
def get_request_group(suffix):
if suffix not in by_suffix:
rq_grp = placement_lib.RequestGroup(use_same_provider=bool(suffix))
by_suffix[suffix] = rq_grp
return by_suffix[suffix]
for key, val in req.GET.items():
match = _QS_KEY_PATTERN.match(key)
if not match:
continue
# `prefix` is 'resources', 'required', or 'member_of'
# `suffix` is an integer string, or None
prefix, suffix = match.groups()
suffix = suffix or ''
request_group = get_request_group(suffix)
if prefix == _QS_RESOURCES:
request_group.resources = normalize_resources_qs_param(val)
elif prefix == _QS_REQUIRED:
request_group.required_traits = normalize_traits_qs_param(
val, allow_forbidden=allow_forbidden)
elif prefix == _QS_MEMBER_OF:
# special handling of member_of qparam since we allow multiple
# member_of params at microversion 1.24.
# NOTE(jaypipes): Yes, this is inefficient to do this when there
# are multiple member_of query parameters, but we do this so we can
# error out if someone passes an "orphaned" member_of request
# group.
# TODO(jaypipes): Do validation of query parameters using
# JSONSchema
request_group.member_of = normalize_member_of_qs_params(
req, suffix)
# Ensure any group with 'required' or 'member_of' also has 'resources'.
orphans = [('required%s' % suff) for suff, group in by_suffix.items()
if group.required_traits and not group.resources]
if orphans:
msg = _('All traits parameters must be associated with resources. '
'Found the following orphaned traits keys: %s')
raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans))
orphans = [('member_of%s' % suff) for suff, group in by_suffix.items()
if group.member_of and not group.resources]
if orphans:
msg = _('All member_of parameters must be associated with '
'resources. Found the following orphaned member_of '
'keys: %s')
raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans))
# All request groups must have resources (which is almost, but not quite,
# verified by the orphan checks above).
if not all(grp.resources for grp in by_suffix.values()):
msg = _("All request groups must specify resources.")
raise webob.exc.HTTPBadRequest(msg)
# The above would still pass if there were no request groups
if not by_suffix:
msg = _("At least one request group (`resources` or `resources{N}`) "
"is required.")
raise webob.exc.HTTPBadRequest(msg)
# Make adjustments for forbidden traits by stripping forbidden out
# of required.
if allow_forbidden:
conflicting_traits = []
for suff, group in by_suffix.items():
forbidden = [trait for trait in group.required_traits
if trait.startswith('!')]
group.required_traits = (group.required_traits - set(forbidden))
group.forbidden_traits = set([trait.lstrip('!') for trait in
forbidden])
conflicts = group.forbidden_traits & group.required_traits
if conflicts:
conflicting_traits.append('required%s: (%s)'
% (suff, ', '.join(conflicts)))
if conflicting_traits:
msg = _('Conflicting required and forbidden traits found in the '
'following traits keys: %s')
raise webob.exc.HTTPBadRequest(msg % ', '.join(conflicting_traits))
return by_suffix
def ensure_consumer(ctx, consumer_uuid, project_id, user_id,
consumer_generation, want_version):
"""Ensures there are records in the consumers, projects and users table for
the supplied external identifiers.
Returns a tuple containing the populated Consumer object containing Project
and User sub-objects and a boolean indicating whether a new Consumer object
was created (as opposed to an existing consumer record retrieved)
:note: If the supplied project or user external identifiers do not match an
existing consumer's project and user identifiers, the existing
consumer's project and user IDs are updated to reflect the supplied
ones.
:param ctx: The request context.
:param consumer_uuid: The uuid of the consumer of the resources.
:param project_id: The external ID of the project consuming the resources.
:param user_id: The external ID of the user consuming the resources.
:param consumer_generation: The generation provided by the user for this
consumer.
:param want_version: the microversion matcher.
:raises webob.exc.HTTPConflict if consumer generation is required and there
was a mismatch
"""
created_new_consumer = False
requires_consumer_generation = want_version.matches((1, 28))
if project_id is None:
project_id = CONF.placement.incomplete_consumer_project_id
user_id = CONF.placement.incomplete_consumer_user_id
try:
proj = project_obj.Project.get_by_external_id(ctx, project_id)
except exception.NotFound:
# Auto-create the project if we found no record of it...
try:
proj = project_obj.Project(ctx, external_id=project_id)
proj.create()
except exception.ProjectExists:
# No worries, another thread created this project already
proj = project_obj.Project.get_by_external_id(ctx, project_id)
try:
user = user_obj.User.get_by_external_id(ctx, user_id)
except exception.NotFound:
# Auto-create the user if we found no record of it...
try:
user = user_obj.User(ctx, external_id=user_id)
user.create()
except exception.UserExists:
# No worries, another thread created this user already
user = user_obj.User.get_by_external_id(ctx, user_id)
try:
consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid)
if requires_consumer_generation:
if consumer.generation != consumer_generation:
raise webob.exc.HTTPConflict(
_('consumer generation conflict - '
'expected %(expected_gen)s but got %(got_gen)s') %
{
'expected_gen': consumer.generation,
'got_gen': consumer_generation,
},
comment=errors.CONCURRENT_UPDATE)
# NOTE(jaypipes): The user may have specified a different project and
# user external ID than the one that we had for the consumer. If this
# is the case, go ahead and modify the consumer record with the
# newly-supplied project/user information, but do not bump the consumer
# generation (since it will be bumped in the
# AllocationList.replace_all() method).
#
# TODO(jaypipes): This means that there may be a partial update.
# Imagine a scenario where a user calls POST /allocations, and the
# payload references two consumers. The first consumer is a new
# consumer and is auto-created. The second consumer is an existing
# consumer, but contains a different project or user ID than the
# existing consumer's record. If the eventual call to
# AllocationList.replace_all() fails for whatever reason (say, a
# resource provider generation conflict or out of resources failure),
# we will end up deleting the auto-created consumer but we MAY not undo
# the changes to the second consumer's project and user ID. I say MAY
# and not WILL NOT because I'm not sure that the exception that gets
# raised from AllocationList.replace_all() will cause the context
# manager's transaction to rollback automatically. I believe that the
# same transaction context is used for both util.ensure_consumer() and
# AllocationList.replace_all() within the same HTTP request, but need
# to test this to be 100% certain...
if (project_id != consumer.project.external_id or
user_id != consumer.user.external_id):
LOG.debug("Supplied project or user ID for consumer %s was "
"different than existing record. Updating consumer "
"record.", consumer_uuid)
consumer.project = proj
consumer.user = user
consumer.update()
except exception.NotFound:
# If we are attempting to modify or create allocations after 1.26, we
# need a consumer generation specified. The user must have specified
# None for the consumer generation if we get here, since there was no
# existing consumer with this UUID and therefore the user should be
# indicating that they expect the consumer did not exist.
if requires_consumer_generation:
if consumer_generation is not None:
raise webob.exc.HTTPConflict(
_('consumer generation conflict - '
'expected null but got %s') % consumer_generation,
comment=errors.CONCURRENT_UPDATE)
# No such consumer. This is common for new allocations. Create the
# consumer record
try:
consumer = consumer_obj.Consumer(
ctx, uuid=consumer_uuid, project=proj, user=user)
consumer.create()
created_new_consumer = True
except exception.ConsumerExists:
# No worries, another thread created this user already
consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid)
return consumer, created_new_consumer

View File

@ -1,120 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI script for Placement API
WSGI handler for running Placement API under Apache2, nginx, gunicorn etc.
"""
import logging as py_logging
import os
import os.path
from oslo_log import log as logging
from oslo_middleware import cors
from oslo_policy import opts as policy_opts
from oslo_utils import importutils
import pbr.version
from nova.api.openstack.placement import db_api
from nova.api.openstack.placement import deploy
from nova import conf
profiler = importutils.try_import('osprofiler.opts')
CONFIG_FILE = 'nova.conf'
version_info = pbr.version.VersionInfo('nova')
def setup_logging(config):
# Any dependent libraries that have unhelp debug levels should be
# pinned to a higher default.
extra_log_level_defaults = [
'routes=INFO',
]
logging.set_defaults(default_log_levels=logging.get_default_log_levels() +
extra_log_level_defaults)
logging.setup(config, 'nova')
py_logging.captureWarnings(True)
def _get_config_file(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_PLACEMENT_CONFIG_DIR', '/etc/nova').strip()
return os.path.join(dirname, CONFIG_FILE)
def _parse_args(argv, default_config_files):
logging.register_options(conf.CONF)
if profiler:
profiler.set_defaults(conf.CONF)
_set_middleware_defaults()
# This is needed so we can check [oslo_policy]/enforce_scope in the
# deploy module.
policy_opts.set_defaults(conf.CONF)
conf.CONF(argv[1:], project='nova', version=version_info.version_string(),
default_config_files=default_config_files)
def _set_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
cors.set_defaults(
allow_headers=['X-Auth-Token',
'X-Openstack-Request-Id',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id'],
expose_headers=['X-Auth-Token',
'X-Openstack-Request-Id',
'X-Subject-Token',
'X-Service-Token'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH']
)
def init_application():
# initialize the config system
conffile = _get_config_file()
# NOTE(lyarwood): Call reset to ensure the ConfigOpts object doesn't
# already contain registered options if the app is reloaded.
conf.CONF.reset()
_parse_args([], default_config_files=[conffile])
db_api.configure(conf.CONF)
# initialize the logging system
setup_logging(conf.CONF)
# dump conf at debug if log_options
if conf.CONF.log_options:
conf.CONF.log_opt_values(
logging.getLogger(__name__),
logging.DEBUG)
# build and return our WSGI app
return deploy.loadapp(conf.CONF)

View File

@ -1,38 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Extend functionality from webob.dec.wsgify for Placement API."""
import webob
from oslo_log import log as logging
from webob.dec import wsgify
from nova.api.openstack.placement import util
LOG = logging.getLogger(__name__)
class PlacementWsgify(wsgify):
def call_func(self, req, *args, **kwargs):
"""Add json_error_formatter to any webob HTTPExceptions."""
try:
super(PlacementWsgify, self).call_func(req, *args, **kwargs)
except webob.exc.HTTPException as exc:
LOG.debug("Placement API returning an error response: %s", exc)
exc.json_formatter = util.json_error_formatter
# The exception itself is not passed to json_error_formatter
# but environ is, so set the environ.
if exc.comment:
req.environ[util.ENV_ERROR_CODE] = exc.comment
exc.comment = None
raise

View File

@ -45,8 +45,6 @@ import six
import six.moves.urllib.parse as urlparse
from sqlalchemy.engine import url as sqla_url
# FIXME(cdent): This is a speedbump in the extraction process
from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.cmd import common as cmd_common
from nova.compute import api as compute_api
import nova.conf
@ -400,9 +398,6 @@ class DbCommands(object):
# need to be populated if it was not specified during boot time.
instance_obj.populate_missing_availability_zones,
# Added in Rocky
# FIXME(cdent): This is a factor that needs to be addressed somehow
consumer_obj.create_incomplete_consumers,
# Added in Rocky
instance_mapping_obj.populate_queued_for_delete,
# Added in Stein
compute_node_obj.migrate_empty_ratio,
@ -864,11 +859,7 @@ class ApiDbCommands(object):
@args('--version', metavar='<version>', help=argparse.SUPPRESS)
@args('version2', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None, version2=None):
"""Sync the database up to the most recent version.
If placement_database.connection is not None, sync that
database using the API database migrations.
"""
"""Sync the database up to the most recent version."""
if version and not version2:
print(_("DEPRECATED: The '--version' parameter was deprecated in "
"the Pike cycle and will not be supported in future "
@ -876,15 +867,7 @@ class ApiDbCommands(object):
"instead"))
version2 = version
# NOTE(cdent): At the moment, the migration code deep in the belly
# of the migration package doesn't actually return anything, so
# returning the result of db_sync is not particularly meaningful
# here. But, in case that changes, we store the result from the
# the placement sync to and with the api sync.
result = True
if CONF.placement_database.connection is not None:
result = migration.db_sync(version2, database='placement')
return migration.db_sync(version2, database='api') and result
return migration.db_sync(version2, database='api')
def version(self):
"""Print the current database version."""
@ -1844,7 +1827,6 @@ class PlacementCommands(object):
return num_processed
# FIXME(cdent): This needs to be addressed as part of extraction.
@action_description(
_("Iterates over non-cell0 cells looking for instances which do "
"not have allocations in the Placement service, or have incomplete "

View File

@ -106,61 +106,9 @@ def enrich_help_text(alt_db_opts):
alt_db_opt.help = db_opt.help + alt_db_opt.help
# NOTE(cdent): See the note above on api_db_group. The same issues
# apply here.
placement_db_group = cfg.OptGroup('placement_database',
title='Placement API database options',
help="""
The *Placement API Database* is a separate database which can be used with the
placement service. This database is optional: if the connection option is not
set, the nova api database will be used instead.
""")
placement_db_opts = [
cfg.StrOpt('connection',
help='',
secret=True),
cfg.StrOpt('connection_parameters',
default='',
help=''),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help=''),
cfg.StrOpt('slave_connection',
secret=True,
help=''),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help=''),
cfg.IntOpt('connection_recycle_time',
default=3600,
help=''),
cfg.IntOpt('max_pool_size',
help=''),
cfg.IntOpt('max_retries',
default=10,
help=''),
cfg.IntOpt('retry_interval',
default=10,
help=''),
cfg.IntOpt('max_overflow',
help=''),
cfg.IntOpt('connection_debug',
default=0,
help=''),
cfg.BoolOpt('connection_trace',
default=False,
help=''),
cfg.IntOpt('pool_timeout',
help=''),
] # noqa
def register_opts(conf):
oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION)
conf.register_opts(api_db_opts, group=api_db_group)
conf.register_opts(placement_db_opts, group=placement_db_group)
def list_opts():
@ -174,9 +122,7 @@ def list_opts():
global _ENRICHED
if not _ENRICHED:
enrich_help_text(api_db_opts)
enrich_help_text(placement_db_opts)
_ENRICHED = True
return {
api_db_group: api_db_opts,
placement_db_group: placement_db_opts,
}

View File

@ -17,80 +17,22 @@ from nova.conf import utils as confutils
DEFAULT_SERVICE_TYPE = 'placement'
DEFAULT_CONSUMER_MISSING_ID = '00000000-0000-0000-0000-000000000000'
placement_group = cfg.OptGroup(
'placement',
title='Placement Service Options',
help="Configuration options for connecting to the placement API service")
placement_opts = [
cfg.BoolOpt(
'randomize_allocation_candidates',
default=False,
help="""
If True, when limiting allocation candidate results, the results will be
a random sampling of the full result set. If False, allocation candidates
are returned in a deterministic but undefined order. That is, all things
being equal, two requests for allocation candidates will return the same
results in the same order; but no guarantees are made as to how that order
is determined.
"""),
# TODO(mriedem): When placement is split out of nova, this should be
# deprecated since then [oslo_policy]/policy_file can be used.
cfg.StrOpt(
'policy_file',
# This default matches what is in
# etc/nova/placement-policy-generator.conf
default='placement-policy.yaml',
help='The file that defines placement policies. This can be an '
'absolute path or relative to the configuration file.'),
cfg.StrOpt(
'incomplete_consumer_project_id',
default=DEFAULT_CONSUMER_MISSING_ID,
help="""
Early API microversions (<1.8) allowed creating allocations and not specifying
a project or user identifier for the consumer. In cleaning up the data
modeling, we no longer allow missing project and user information. If an older
client makes an allocation, we'll use this in place of the information it
doesn't provide.
"""),
cfg.StrOpt(
'incomplete_consumer_user_id',
default=DEFAULT_CONSUMER_MISSING_ID,
help="""
Early API microversions (<1.8) allowed creating allocations and not specifying
a project or user identifier for the consumer. In cleaning up the data
modeling, we no longer allow missing project and user information. If an older
client makes an allocation, we'll use this in place of the information it
doesn't provide.
"""),
]
# Duplicate log_options from oslo_service so that we don't have to import
# that package into placement.
# NOTE(cdent): Doing so ends up requiring eventlet and other unnecessary
# packages for just this one setting.
service_opts = [
cfg.BoolOpt('log_options',
default=True,
help='Enables or disables logging values of all registered '
'options when starting a service (at DEBUG level).'),
]
def register_opts(conf):
conf.register_group(placement_group)
conf.register_opts(placement_opts, group=placement_group)
conf.register_opts(service_opts)
confutils.register_ksa_opts(conf, placement_group, DEFAULT_SERVICE_TYPE)
def list_opts():
return {
placement_group.name: (
placement_opts +
ks_loading.get_session_conf_options() +
ks_loading.get_auth_common_conf_options() +
ks_loading.get_auth_plugin_conf_options('password') +

View File

@ -18,7 +18,6 @@
from oslo_log import log
from oslo_utils import importutils
from nova.api.openstack.placement import db_api as placement_db
from nova.common import config
import nova.conf
from nova.db.sqlalchemy import api as sqlalchemy_api
@ -62,4 +61,3 @@ def parse_args(argv, default_config_files=None, configure_db=True,
if configure_db:
sqlalchemy_api.configure(CONF)
placement_db.configure(CONF)

View File

@ -24,7 +24,6 @@ from oslo_log import log as logging
import sqlalchemy
from sqlalchemy.sql import null
from nova.api.openstack.placement import db_api as placement_db
from nova.db.sqlalchemy import api as db_session
from nova import exception
from nova.i18n import _
@ -32,7 +31,6 @@ from nova.i18n import _
INIT_VERSION = {}
INIT_VERSION['main'] = 215
INIT_VERSION['api'] = 0
INIT_VERSION['placement'] = 0
_REPOSITORY = {}
LOG = logging.getLogger(__name__)
@ -43,8 +41,6 @@ def get_engine(database='main', context=None):
return db_session.get_engine(context=context)
if database == 'api':
return db_session.get_api_engine()
if database == 'placement':
return placement_db.get_placement_engine()
def db_sync(version=None, database='main', context=None):
@ -173,10 +169,7 @@ def _find_migrate_repo(database='main'):
"""Get the path for the migrate repository."""
global _REPOSITORY
rel_path = 'migrate_repo'
if database == 'api' or database == 'placement':
# NOTE(cdent): For the time being the placement database (if
# it is being used) is a replica (in structure) of the api
# database.
if database == 'api':
rel_path = os.path.join('api_migrations', 'migrate_repo')
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
rel_path)

View File

@ -626,15 +626,12 @@ def check_config_option_in_central_place(logical_line, filename):
def check_policy_registration_in_central_place(logical_line, filename):
msg = ('N350: Policy registration should be in the central location(s) '
'"/nova/policies/*" or "nova/api/openstack/placement/policies/*".')
'"/nova/policies/*"')
# This is where registration should happen
if ("nova/policies/" in filename or
"nova/api/openstack/placement/policies/" in filename):
if "nova/policies/" in filename:
return
# A couple of policy tests register rules
if ("nova/tests/unit/test_policy.py" in filename or
"nova/tests/unit/api/openstack/placement/test_policy.py" in
filename):
if "nova/tests/unit/test_policy.py" in filename:
return
if rule_default_re.match(logical_line):

View File

@ -1,70 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Standard Resource Class Fields."""
# NOTE(cdent): This file is only used by the placement code within
# nova. Other uses of resource classes in nova make use of the
# os-resource-classes library. The placement code within nova
# continues to use this so that that code can remain unchanged.
import re
from oslo_versionedobjects import fields
class ResourceClass(fields.StringField):
"""Classes of resources provided to consumers."""
CUSTOM_NAMESPACE = 'CUSTOM_'
"""All non-standard resource classes must begin with this string."""
VCPU = 'VCPU'
MEMORY_MB = 'MEMORY_MB'
DISK_GB = 'DISK_GB'
PCI_DEVICE = 'PCI_DEVICE'
SRIOV_NET_VF = 'SRIOV_NET_VF'
NUMA_SOCKET = 'NUMA_SOCKET'
NUMA_CORE = 'NUMA_CORE'
NUMA_THREAD = 'NUMA_THREAD'
NUMA_MEMORY_MB = 'NUMA_MEMORY_MB'
IPV4_ADDRESS = 'IPV4_ADDRESS'
VGPU = 'VGPU'
VGPU_DISPLAY_HEAD = 'VGPU_DISPLAY_HEAD'
# Standard resource class for network bandwidth egress measured in
# kilobits per second.
NET_BW_EGR_KILOBIT_PER_SEC = 'NET_BW_EGR_KILOBIT_PER_SEC'
# Standard resource class for network bandwidth ingress measured in
# kilobits per second.
NET_BW_IGR_KILOBIT_PER_SEC = 'NET_BW_IGR_KILOBIT_PER_SEC'
# The ordering here is relevant. If you must add a value, only
# append.
STANDARD = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF,
NUMA_SOCKET, NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB,
IPV4_ADDRESS, VGPU, VGPU_DISPLAY_HEAD,
NET_BW_EGR_KILOBIT_PER_SEC, NET_BW_IGR_KILOBIT_PER_SEC)
@classmethod
def normalize_name(cls, rc_name):
if rc_name is None:
return None
# Replace non-alphanumeric characters with underscores
norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name)
# Bug #1762789: Do .upper after replacing non alphanumerics.
norm_name = norm_name.upper()
norm_name = cls.CUSTOM_NAMESPACE + norm_name
return norm_name
class ResourceClassField(fields.AutoTypedField):
AUTO_TYPE = ResourceClass()

View File

@ -24,6 +24,7 @@ from nova import test
from nova.tests.functional import integrated_helpers
CONF = config.CONF
INCOMPLETE_CONSUMER_ID = '00000000-0000-0000-0000-000000000000'
class NovaManageDBIronicTest(test.TestCase):
@ -626,10 +627,8 @@ class TestNovaManagePlacementHealAllocations(
# the project_id and user_id are based on the sentinel values.
allocations = self.placement_api.get(
'/allocations/%s' % server['id'], version='1.12').body
self.assertEqual(CONF.placement.incomplete_consumer_project_id,
allocations['project_id'])
self.assertEqual(CONF.placement.incomplete_consumer_user_id,
allocations['user_id'])
self.assertEqual(INCOMPLETE_CONSUMER_ID, allocations['project_id'])
self.assertEqual(INCOMPLETE_CONSUMER_ID, allocations['user_id'])
allocations = allocations['allocations']
self.assertIn(rp_uuid, allocations)
self.assertFlavorMatchesAllocation(self.flavor, server['id'], rp_uuid)

View File

@ -18,7 +18,6 @@ import fixtures
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from nova.api.openstack.placement import policy as placement_policy
import nova.conf
from nova.conf import paths
from nova import policies
@ -127,32 +126,3 @@ class RoleBasedPolicyFixture(RealPolicyFixture):
self.policy_file = os.path.join(self.policy_dir.path, 'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy, f)
class PlacementPolicyFixture(fixtures.Fixture):
"""Load the default placement policy for tests.
This fixture requires nova.tests.unit.conf_fixture.ConfFixture.
"""
def setUp(self):
super(PlacementPolicyFixture, self).setUp()
policy_file = paths.state_path_def('etc/nova/placement-policy.yaml')
CONF.set_override('policy_file', policy_file, group='placement')
placement_policy.reset()
placement_policy.init()
self.addCleanup(placement_policy.reset)
@staticmethod
def set_rules(rules, overwrite=True):
"""Set placement policy rules.
.. note:: The rules must first be registered via the
Enforcer.register_defaults method.
:param rules: dict of action=rule mappings to set
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
enforcer = placement_policy.get_enforcer()
enforcer.set_rules(oslo_policy.Rules.from_dict(rules),
overwrite=overwrite)

View File

@ -91,9 +91,6 @@ class TestParseArgs(test.NoDBTestCase):
m = mock.patch('nova.db.sqlalchemy.api.configure')
self.nova_db_config_mock = m.start()
self.addCleanup(self.nova_db_config_mock.stop)
m = mock.patch('nova.api.openstack.placement.db_api.configure')
self.placement_db_config_mock = m.start()
self.addCleanup(self.placement_db_config_mock.stop)
@mock.patch.object(config.log, 'register_options')
def test_parse_args_glance_debug_false(self, register_options):
@ -101,7 +98,6 @@ class TestParseArgs(test.NoDBTestCase):
config.parse_args([], configure_db=False, init_rpc=False)
self.assertIn('glanceclient=WARN', config.CONF.default_log_levels)
self.nova_db_config_mock.assert_not_called()
self.placement_db_config_mock.assert_not_called()
@mock.patch.object(config.log, 'register_options')
def test_parse_args_glance_debug_true(self, register_options):
@ -109,4 +105,3 @@ class TestParseArgs(test.NoDBTestCase):
config.parse_args([], configure_db=True, init_rpc=False)
self.assertIn('glanceclient=DEBUG', config.CONF.default_log_levels)
self.nova_db_config_mock.assert_called_once_with(config.CONF)
self.placement_db_config_mock.assert_called_once_with(config.CONF)

View File

@ -2562,7 +2562,7 @@ class TestNovaManagePlacement(test.NoDBTestCase):
new_callable=mock.NonCallableMock) # assert not called
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put',
return_value=fake_requests.FakeResponse(204))
def test_heal_allocations_sentinel_consumer(
def test_heal_allocations(
self, mock_put, mock_get_compute_node, mock_get_allocs,
mock_get_instances, mock_get_all_cells):
"""Tests the scenario that there are allocations created using
@ -2584,8 +2584,8 @@ class TestNovaManagePlacement(test.NoDBTestCase):
}
}
},
"project_id": CONF.placement.incomplete_consumer_project_id,
"user_id": CONF.placement.incomplete_consumer_user_id
"project_id": uuidsentinel.project_id,
"user_id": uuidsentinel.user_id
}
self.assertEqual(0, self.cli.heal_allocations(verbose=True))
self.assertIn('Processed 1 instances.', self.output.getvalue())
@ -2614,7 +2614,7 @@ class TestNovaManagePlacement(test.NoDBTestCase):
return_value=fake_requests.FakeResponse(
409, content='Inventory and/or allocations changed while '
'attempting to allocate'))
def test_heal_allocations_sentinel_consumer_put_fails(
def test_heal_allocations_put_fails(
self, mock_put, mock_get_allocs, mock_get_instances,
mock_get_all_cells):
"""Tests the scenario that there are allocations created using
@ -2634,8 +2634,8 @@ class TestNovaManagePlacement(test.NoDBTestCase):
}
}
},
"project_id": CONF.placement.incomplete_consumer_project_id,
"user_id": CONF.placement.incomplete_consumer_user_id
"project_id": uuidsentinel.project_id,
"user_id": uuidsentinel.user_id
}
self.assertEqual(3, self.cli.heal_allocations(verbose=True))
self.assertIn(

View File

@ -0,0 +1,13 @@
---
other:
- |
The code for the `placement service
<https://docs.openstack.org/placement>`_ was moved to its own
`repository <https://git.openstack.org/cgit/openstack/placement>`_ in
Stein. The placement code in nova has been deleted.
upgrade:
- |
If you upgraded your OpenStack deployment to Stein without switching to use
the now independent placement service, you must do so before upgrading to
Train. `Instructions <https://docs.openstack.org/placement/latest/upgrade/to-stein.html>`_
for one way to do this are available.

View File

@ -40,7 +40,6 @@ oslo.config.opts.defaults =
oslo.policy.enforcer =
nova = nova.policy:get_enforcer
placement = nova.api.openstack.placement.policy:get_enforcer
oslo.policy.policies =
# The sample policies will be ordered by entry point and then by list
@ -48,7 +47,6 @@ oslo.policy.policies =
# list_rules method into a separate entry point rather than using the
# aggregate method.
nova = nova.policies:list_rules
placement = nova.api.openstack.placement.policies:list_rules
nova.compute.monitors.cpu =
virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor
@ -74,7 +72,6 @@ console_scripts =
nova-status = nova.cmd.status:main
nova-xvpvncproxy = nova.cmd.xvpvncproxy:main
wsgi_scripts =
nova-placement-api = nova.api.openstack.placement.wsgi:init_application
nova-api-wsgi = nova.api.openstack.compute.wsgi:init_application
nova-metadata-wsgi = nova.api.metadata.wsgi:init_application

View File

@ -93,7 +93,7 @@ commands =
# special way. See the following for more details.
# http://stestr.readthedocs.io/en/latest/MANUAL.html#grouping-tests
# https://gabbi.readthedocs.io/en/latest/#purpose
stestr --test-path=./nova/tests/functional --group_regex=nova\.tests\.functional\.api\.openstack\.placement\.test_placement_api(?:\.|_)([^_]+) run {posargs}
stestr --test-path=./nova/tests/functional run {posargs}
stestr slowest
# TODO(gcb) Merge this into [testenv:functional] when functional tests are gating
@ -132,11 +132,6 @@ envdir = {toxworkdir}/shared
commands =
oslopolicy-sample-generator --config-file=etc/nova/nova-policy-generator.conf
[testenv:genplacementpolicy]
envdir = {toxworkdir}/shared
commands =
oslopolicy-sample-generator --config-file=etc/nova/placement-policy-generator.conf
[testenv:cover]
# TODO(stephenfin): Remove the PYTHON hack below in favour of a [coverage]
# section once we rely on coverage 4.3+