diff --git a/.zuul.yaml b/.zuul.yaml index 38249544e19e..585b1cf612b4 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -48,6 +48,8 @@ Run tox-based functional tests for the OpenStack Nova project with Nova specific irrelevant-files list. Uses tox with the ``functional`` environment. + required-projects: + - openstack/placement irrelevant-files: &functional-irrelevant-files - ^.*\.rst$ - ^api-.*$ @@ -56,6 +58,7 @@ - ^releasenotes/.*$ vars: tox_envlist: functional + tox_install_siblings: true timeout: 3600 - job: @@ -65,9 +68,12 @@ Run tox-based functional tests for the OpenStack Nova project under cPython version 3.5. with Nova specific irrelevant-files list. Uses tox with the ``functional-py35`` environment. + required-projects: + - openstack/placement irrelevant-files: *functional-irrelevant-files vars: tox_envlist: functional-py35 + tox_install_siblings: true timeout: 3600 - job: diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py index 6e53d27af004..eb2ea0076999 100644 --- a/nova/cmd/manage.py +++ b/nova/cmd/manage.py @@ -45,6 +45,7 @@ import six import six.moves.urllib.parse as urlparse from sqlalchemy.engine import url as sqla_url +# FIXME(cdent): This is a speedbump in the extraction process from nova.api.openstack.placement.objects import consumer as consumer_obj from nova.cmd import common as cmd_common from nova.compute import api as compute_api @@ -416,6 +417,7 @@ class DbCommands(object): # need to be populated if it was not specified during boot time. instance_obj.populate_missing_availability_zones, # Added in Rocky + # FIXME(cdent): This is a factor that needs to be addressed somehow consumer_obj.create_incomplete_consumers, # Added in Rocky instance_mapping_obj.populate_queued_for_delete, @@ -1987,6 +1989,7 @@ class PlacementCommands(object): return num_processed + # FIXME(cdent): This needs to be addressed as part of extraction. @action_description( _("Iterates over non-cell0 cells looking for instances which do " "not have allocations in the Placement service, or have incomplete " diff --git a/nova/cmd/status.py b/nova/cmd/status.py index 091fa3918707..c5e4f43fbc16 100644 --- a/nova/cmd/status.py +++ b/nova/cmd/status.py @@ -251,6 +251,7 @@ class UpgradeCommands(object): # and resource class, so we can simply count the number of inventories # records for the given resource class and those will uniquely identify # the number of resource providers we care about. + # FIXME(cdent): This will be a different project soon. meta = MetaData(bind=placement_db.get_placement_engine()) inventories = Table('inventories', meta, autoload=True) return select([sqlfunc.count()]).select_from( diff --git a/nova/test.py b/nova/test.py index ea7fa2d04779..cb6302c403ad 100644 --- a/nova/test.py +++ b/nova/test.py @@ -49,7 +49,6 @@ from oslotest import moxstubout import six import testtools -from nova.api.openstack.placement.objects import resource_provider from nova import context from nova.db import api as db from nova import exception @@ -260,7 +259,6 @@ class TestCase(testtools.TestCase): # NOTE(danms): Full database setup involves a cell0, cell1, # and the relevant mappings. self.useFixture(nova_fixtures.Database(database='api')) - self.useFixture(nova_fixtures.Database(database='placement')) self._setup_cells() self.useFixture(nova_fixtures.DefaultFlavorsFixture()) elif not self.USES_DB_SELF: @@ -281,12 +279,6 @@ class TestCase(testtools.TestCase): # caching of that value. utils._IS_NEUTRON = None - # Reset the traits sync and rc cache flags - def _reset_traits(): - resource_provider._TRAITS_SYNCED = False - _reset_traits() - self.addCleanup(_reset_traits) - resource_provider._RC_CACHE = None # Reset the global QEMU version flag. images.QEMU_VERSION = None @@ -296,8 +288,6 @@ class TestCase(testtools.TestCase): self.addCleanup(self._clear_attrs) self.useFixture(fixtures.EnvironmentVariable('http_proxy')) self.policy = self.useFixture(policy_fixture.PolicyFixture()) - self.placement_policy = self.useFixture( - policy_fixture.PlacementPolicyFixture()) self.useFixture(nova_fixtures.PoisonFunctions()) diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py index 8459a4830f8a..3f062b36aac1 100644 --- a/nova/tests/fixtures.py +++ b/nova/tests/fixtures.py @@ -26,8 +26,6 @@ import random import warnings import fixtures -from keystoneauth1 import adapter as ka -from keystoneauth1 import session as ks import mock from neutronclient.common import exceptions as neutron_client_exc from oslo_concurrency import lockutils @@ -41,7 +39,6 @@ from requests import adapters from wsgi_intercept import interceptor from nova.api.openstack.compute import tenant_networks -from nova.api.openstack.placement import db_api as placement_db from nova.api.openstack import wsgi_app from nova.api import wsgi from nova.compute import rpcapi as compute_rpcapi @@ -57,12 +54,11 @@ from nova import quota as nova_quota from nova import rpc from nova import service from nova.tests.functional.api import client -from nova.tests.functional.api.openstack.placement.fixtures import placement _TRUE_VALUES = ('True', 'true', '1', 'yes') CONF = cfg.CONF -DB_SCHEMA = {'main': "", 'api': "", 'placement': ""} +DB_SCHEMA = {'main': "", 'api': ""} SESSION_CONFIGURED = False @@ -631,7 +627,7 @@ class Database(fixtures.Fixture): def __init__(self, database='main', connection=None): """Create a database fixture. - :param database: The type of database, 'main', 'api' or 'placement' + :param database: The type of database, 'main', or 'api' :param connection: The connection string to use """ super(Database, self).__init__() @@ -640,7 +636,6 @@ class Database(fixtures.Fixture): global SESSION_CONFIGURED if not SESSION_CONFIGURED: session.configure(CONF) - placement_db.configure(CONF) SESSION_CONFIGURED = True self.database = database if database == 'main': @@ -652,8 +647,6 @@ class Database(fixtures.Fixture): self.get_engine = session.get_engine elif database == 'api': self.get_engine = session.get_api_engine - elif database == 'placement': - self.get_engine = placement_db.get_placement_engine def _cache_schema(self): global DB_SCHEMA @@ -687,7 +680,7 @@ class DatabaseAtVersion(fixtures.Fixture): """Create a database fixture. :param version: Max version to sync to (or None for current) - :param database: The type of database, 'main', 'api', 'placement' + :param database: The type of database, 'main', 'api' """ super(DatabaseAtVersion, self).__init__() self.database = database @@ -696,8 +689,6 @@ class DatabaseAtVersion(fixtures.Fixture): self.get_engine = session.get_engine elif database == 'api': self.get_engine = session.get_api_engine - elif database == 'placement': - self.get_engine = placement_db.get_placement_engine def cleanup(self): engine = self.get_engine() @@ -1853,136 +1844,6 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture): fake_get_all_volume_types) -class PlacementApiClient(object): - def __init__(self, placement_fixture): - self.fixture = placement_fixture - - def get(self, url, **kwargs): - return client.APIResponse(self.fixture._fake_get(None, url, **kwargs)) - - def put(self, url, body, **kwargs): - return client.APIResponse( - self.fixture._fake_put(None, url, body, **kwargs)) - - def post(self, url, body, **kwargs): - return client.APIResponse( - self.fixture._fake_post(None, url, body, **kwargs)) - - -class PlacementFixture(placement.PlacementFixture): - """A fixture to placement operations. - - Runs a local WSGI server bound on a free port and having the Placement - application with NoAuth middleware. - This fixture also prevents calling the ServiceCatalog for getting the - endpoint. - - It's possible to ask for a specific token when running the fixtures so - all calls would be passing this token. - - Most of the time users of this fixture will also want the placement - database fixture (called first) as well: - - self.useFixture(nova_fixtures.Database(database='placement')) - - That is left as a manual step so tests may have fine grain control, and - because it is likely that these fixtures will continue to evolve as - the separation of nova and placement continues. - """ - - def setUp(self): - super(PlacementFixture, self).setUp() - - # Turn off manipulation of socket_options in TCPKeepAliveAdapter - # to keep wsgi-intercept happy. Replace it with the method - # from its superclass. - self.useFixture(fixtures.MonkeyPatch( - 'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager', - adapters.HTTPAdapter.init_poolmanager)) - - self._client = ka.Adapter(ks.Session(auth=None), raise_exc=False) - # NOTE(sbauza): We need to mock the scheduler report client because - # we need to fake Keystone by directly calling the endpoint instead - # of looking up the service catalog, like we did for the OSAPIFixture. - self.useFixture(fixtures.MonkeyPatch( - 'nova.scheduler.client.report.SchedulerReportClient.get', - self._fake_get)) - self.useFixture(fixtures.MonkeyPatch( - 'nova.scheduler.client.report.SchedulerReportClient.post', - self._fake_post)) - self.useFixture(fixtures.MonkeyPatch( - 'nova.scheduler.client.report.SchedulerReportClient.put', - self._fake_put)) - self.useFixture(fixtures.MonkeyPatch( - 'nova.scheduler.client.report.SchedulerReportClient.delete', - self._fake_delete)) - - self.api = PlacementApiClient(self) - - @staticmethod - def _update_headers_with_version(headers, **kwargs): - version = kwargs.get("version") - if version is not None: - # TODO(mriedem): Perform some version discovery at some point. - headers.update({ - 'OpenStack-API-Version': 'placement %s' % version - }) - - def _fake_get(self, *args, **kwargs): - (url,) = args[1:] - # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 - # in case a token is not provided. We should change that by creating - # a fake token so we could remove adding the header below. - headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) - return self._client.get( - url, - endpoint_override=self.endpoint, - headers=headers) - - def _fake_post(self, *args, **kwargs): - (url, data) = args[1:] - # NOTE(sdague): using json= instead of data= sets the - # media type to application/json for us. Placement API is - # more sensitive to this than other APIs in the OpenStack - # ecosystem. - # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 - # in case a token is not provided. We should change that by creating - # a fake token so we could remove adding the header below. - headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) - return self._client.post( - url, json=data, - endpoint_override=self.endpoint, - headers=headers) - - def _fake_put(self, *args, **kwargs): - (url, data) = args[1:] - # NOTE(sdague): using json= instead of data= sets the - # media type to application/json for us. Placement API is - # more sensitive to this than other APIs in the OpenStack - # ecosystem. - # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 - # in case a token is not provided. We should change that by creating - # a fake token so we could remove adding the header below. - headers = {'x-auth-token': self.token} - self._update_headers_with_version(headers, **kwargs) - return self._client.put( - url, json=data, - endpoint_override=self.endpoint, - headers=headers) - - def _fake_delete(self, *args, **kwargs): - (url,) = args[1:] - # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 - # in case a token is not provided. We should change that by creating - # a fake token so we could remove adding the header below. - return self._client.delete( - url, - endpoint_override=self.endpoint, - headers={'x-auth-token': self.token}) - - class UnHelperfulClientChannel(privsep_daemon._ClientChannel): def __init__(self, context): raise Exception('You have attempted to start a privsep helper. ' diff --git a/nova/tests/functional/api/openstack/placement/__init__.py b/nova/tests/functional/api/openstack/placement/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/functional/api/openstack/placement/base.py b/nova/tests/functional/api/openstack/placement/base.py deleted file mode 100644 index 14164fb70a5a..000000000000 --- a/nova/tests/functional/api/openstack/placement/base.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslotest import output -import testtools - -from nova.api.openstack.placement import context -from nova.api.openstack.placement import deploy -from nova.api.openstack.placement.objects import resource_provider -from nova.tests import fixtures -from nova.tests.functional.api.openstack.placement.fixtures import capture -from nova.tests.unit import policy_fixture - - -CONF = cfg.CONF - - -class TestCase(testtools.TestCase): - """A base test case for placement functional tests. - - Sets up minimum configuration for database and policy handling - and establishes the placement database. - """ - - def setUp(self): - super(TestCase, self).setUp() - - # Manage required configuration - conf_fixture = self.useFixture(config_fixture.Config(CONF)) - # The Database fixture will get confused if only one of the databases - # is configured. - for group in ('placement_database', 'api_database', 'database'): - conf_fixture.config( - group=group, - connection='sqlite://', - sqlite_synchronous=False) - CONF([], default_config_files=[]) - - self.useFixture(policy_fixture.PlacementPolicyFixture()) - - self.useFixture(capture.Logging()) - self.useFixture(output.CaptureOutput()) - # Filter ignorable warnings during test runs. - self.useFixture(capture.WarningsFixture()) - - self.placement_db = self.useFixture( - fixtures.Database(database='placement')) - self._reset_database() - self.context = context.RequestContext() - # Do database syncs, such as traits sync. - deploy.update_database() - self.addCleanup(self._reset_database) - - @staticmethod - def _reset_database(): - """Reset database sync flags to base state.""" - resource_provider._TRAITS_SYNCED = False - resource_provider._RC_CACHE = None diff --git a/nova/tests/functional/api/openstack/placement/db/__init__.py b/nova/tests/functional/api/openstack/placement/db/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py b/nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py deleted file mode 100644 index 8903fedb1219..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_allocation_candidates.py +++ /dev/null @@ -1,2800 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import os_traits -from oslo_config import cfg -from oslo_utils.fixture import uuidsentinel as uuids -import six -import sqlalchemy as sa - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import lib as placement_lib -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova import rc_fields as fields -from nova.tests.functional.api.openstack.placement.db import test_base as tb - - -CONF = cfg.CONF - - -class ProviderDBHelperTestCase(tb.PlacementDbBaseTestCase): - - def test_get_provider_ids_matching(self): - # These RPs are named based on whether we expect them to be 'incl'uded - # or 'excl'uded in the result. - - # No inventory records. This one should never show up in a result. - self._create_provider('no_inventory') - - # Inventory of adequate CPU and memory, no allocations against it. - excl_big_cm_noalloc = self._create_provider('big_cm_noalloc') - tb.add_inventory(excl_big_cm_noalloc, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_big_cm_noalloc, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048) - - # Inventory of adequate memory and disk, no allocations against it. - excl_big_md_noalloc = self._create_provider('big_md_noalloc') - tb.add_inventory(excl_big_md_noalloc, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048) - tb.add_inventory(excl_big_md_noalloc, fields.ResourceClass.DISK_GB, - 2000) - - # Adequate inventory, no allocations against it. - incl_biginv_noalloc = self._create_provider('biginv_noalloc') - tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.VCPU, 15) - tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048) - tb.add_inventory(incl_biginv_noalloc, fields.ResourceClass.DISK_GB, - 2000) - - # No allocations, but inventory unusable. Try to hit all the possible - # reasons for exclusion. - # VCPU min_unit too high - excl_badinv_min_unit = self._create_provider('badinv_min_unit') - tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.VCPU, 12, - min_unit=6) - tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048) - tb.add_inventory(excl_badinv_min_unit, fields.ResourceClass.DISK_GB, - 2000) - # MEMORY_MB max_unit too low - excl_badinv_max_unit = self._create_provider('badinv_max_unit') - tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=512) - tb.add_inventory(excl_badinv_max_unit, fields.ResourceClass.DISK_GB, - 2000) - # DISK_GB unsuitable step_size - excl_badinv_step_size = self._create_provider('badinv_step_size') - tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048) - tb.add_inventory(excl_badinv_step_size, fields.ResourceClass.DISK_GB, - 2000, step_size=7) - # Not enough total VCPU - excl_badinv_total = self._create_provider('badinv_total') - tb.add_inventory(excl_badinv_total, fields.ResourceClass.VCPU, 4) - tb.add_inventory(excl_badinv_total, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048) - tb.add_inventory(excl_badinv_total, fields.ResourceClass.DISK_GB, 2000) - # Too much reserved MEMORY_MB - excl_badinv_reserved = self._create_provider('badinv_reserved') - tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.VCPU, 15) - tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048, reserved=3500) - tb.add_inventory(excl_badinv_reserved, fields.ResourceClass.DISK_GB, - 2000) - # DISK_GB allocation ratio blows it up - excl_badinv_alloc_ratio = self._create_provider('badinv_alloc_ratio') - tb.add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.VCPU, - 15) - tb.add_inventory(excl_badinv_alloc_ratio, - fields.ResourceClass.MEMORY_MB, 4096, max_unit=2048) - tb.add_inventory(excl_badinv_alloc_ratio, fields.ResourceClass.DISK_GB, - 2000, allocation_ratio=0.5) - - # Inventory consumed in one RC, but available in the others - excl_1invunavail = self._create_provider('1invunavail') - tb.add_inventory(excl_1invunavail, fields.ResourceClass.VCPU, 10) - self.allocate_from_provider( - excl_1invunavail, fields.ResourceClass.VCPU, 7) - tb.add_inventory(excl_1invunavail, fields.ResourceClass.MEMORY_MB, - 4096) - self.allocate_from_provider(excl_1invunavail, - fields.ResourceClass.MEMORY_MB, 1024) - tb.add_inventory(excl_1invunavail, fields.ResourceClass.DISK_GB, 2000) - self.allocate_from_provider(excl_1invunavail, - fields.ResourceClass.DISK_GB, 400) - - # Inventory all consumed - excl_allused = self._create_provider('allused') - tb.add_inventory(excl_allused, fields.ResourceClass.VCPU, 10) - self.allocate_from_provider(excl_allused, fields.ResourceClass.VCPU, 7) - tb.add_inventory(excl_allused, fields.ResourceClass.MEMORY_MB, 4000) - self.allocate_from_provider(excl_allused, - fields.ResourceClass.MEMORY_MB, 1500) - self.allocate_from_provider(excl_allused, - fields.ResourceClass.MEMORY_MB, 2000) - tb.add_inventory(excl_allused, fields.ResourceClass.DISK_GB, 1500) - self.allocate_from_provider(excl_allused, fields.ResourceClass.DISK_GB, - 1) - - # Inventory available in requested classes, but unavailable in others - incl_extra_full = self._create_provider('extra_full') - tb.add_inventory(incl_extra_full, fields.ResourceClass.VCPU, 20) - self.allocate_from_provider(incl_extra_full, fields.ResourceClass.VCPU, - 15) - tb.add_inventory(incl_extra_full, fields.ResourceClass.MEMORY_MB, 4096) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.MEMORY_MB, 1024) - tb.add_inventory(incl_extra_full, fields.ResourceClass.DISK_GB, 2000) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.DISK_GB, 400) - tb.add_inventory(incl_extra_full, fields.ResourceClass.PCI_DEVICE, 4) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.PCI_DEVICE, 1) - self.allocate_from_provider(incl_extra_full, - fields.ResourceClass.PCI_DEVICE, 3) - - # Inventory available in a unrequested classes, not in requested ones - excl_extra_avail = self._create_provider('extra_avail') - # Incompatible step size - tb.add_inventory(excl_extra_avail, fields.ResourceClass.VCPU, 10, - step_size=3) - # Not enough left after reserved + used - tb.add_inventory(excl_extra_avail, fields.ResourceClass.MEMORY_MB, - 4096, max_unit=2048, reserved=2048) - self.allocate_from_provider(excl_extra_avail, - fields.ResourceClass.MEMORY_MB, 1040) - # Allocation ratio math - tb.add_inventory(excl_extra_avail, fields.ResourceClass.DISK_GB, 2000, - allocation_ratio=0.5) - tb.add_inventory(excl_extra_avail, fields.ResourceClass.IPV4_ADDRESS, - 48) - custom_special = rp_obj.ResourceClass(self.ctx, name='CUSTOM_SPECIAL') - custom_special.create() - tb.add_inventory(excl_extra_avail, 'CUSTOM_SPECIAL', 100) - self.allocate_from_provider(excl_extra_avail, 'CUSTOM_SPECIAL', 99) - - resources = { - fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 5, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.MEMORY_MB): 1024, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.DISK_GB): 1500 - } - - # Run it! - res = rp_obj._get_provider_ids_matching(self.ctx, resources, {}, {}) - - # We should get all the incl_* RPs - expected = [incl_biginv_noalloc, incl_extra_full] - - self.assertEqual(set((rp.id, rp.id) for rp in expected), set(res)) - - # Now request that the providers must have a set of required traits and - # that this results in no results returned, since we haven't yet - # associated any traits with the providers - avx2_t = rp_obj.Trait.get_by_name(self.ctx, os_traits.HW_CPU_X86_AVX2) - # _get_provider_ids_matching()'s required_traits and forbidden_traits - # arguments maps, keyed by trait name, of the trait internal ID - req_traits = {os_traits.HW_CPU_X86_AVX2: avx2_t.id} - res = rp_obj._get_provider_ids_matching(self.ctx, resources, - req_traits, {}) - - self.assertEqual([], res) - - # Next let's set the required trait to an excl_* RPs. - # This should result in no results returned as well. - excl_big_md_noalloc.set_traits([avx2_t]) - res = rp_obj._get_provider_ids_matching(self.ctx, resources, - req_traits, {}) - self.assertEqual([], res) - - # OK, now add the trait to one of the incl_* providers and verify that - # provider now shows up in our results - incl_biginv_noalloc.set_traits([avx2_t]) - res = rp_obj._get_provider_ids_matching(self.ctx, resources, - req_traits, {}) - - rp_ids = [r[0] for r in res] - self.assertEqual([incl_biginv_noalloc.id], rp_ids) - - def test_get_provider_ids_matching_with_multiple_forbidden(self): - rp1 = self._create_provider('rp1', uuids.agg1) - tb.add_inventory(rp1, fields.ResourceClass.VCPU, 64) - - rp2 = self._create_provider('rp2', uuids.agg1) - trait_two, = tb.set_traits(rp2, 'CUSTOM_TWO') - tb.add_inventory(rp2, fields.ResourceClass.VCPU, 64) - - rp3 = self._create_provider('rp3') - trait_three, = tb.set_traits(rp3, 'CUSTOM_THREE') - tb.add_inventory(rp3, fields.ResourceClass.VCPU, 64) - - resources = { - fields.ResourceClass.STANDARD.index(fields.ResourceClass.VCPU): 4} - res = rp_obj._get_provider_ids_matching( - self.ctx, resources, {}, - {trait_two.name: trait_two.id, - trait_three.name: trait_three.id}, member_of=[[uuids.agg1]]) - self.assertEqual({(rp1.id, rp1.id)}, set(res)) - - def test_get_provider_ids_having_all_traits(self): - def run(traitnames, expected_ids): - tmap = {} - if traitnames: - tmap = rp_obj._trait_ids_from_names(self.ctx, traitnames) - obs = rp_obj._get_provider_ids_having_all_traits(self.ctx, tmap) - self.assertEqual(sorted(expected_ids), sorted(obs)) - - # No traits. This will never be returned, because it's illegal to - # invoke the method with no traits. - self._create_provider('cn1') - - # One trait - cn2 = self._create_provider('cn2') - tb.set_traits(cn2, 'HW_CPU_X86_TBM') - - # One the same as cn2 - cn3 = self._create_provider('cn3') - tb.set_traits(cn3, 'HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', - 'HW_CPU_X86_SGX') - - # Disjoint - cn4 = self._create_provider('cn4') - tb.set_traits(cn4, 'HW_CPU_X86_SSE2', 'HW_CPU_X86_SSE3', 'CUSTOM_FOO') - - # Request with no traits not allowed - self.assertRaises( - ValueError, - rp_obj._get_provider_ids_having_all_traits, self.ctx, None) - self.assertRaises( - ValueError, - rp_obj._get_provider_ids_having_all_traits, self.ctx, {}) - - # Common trait returns both RPs having it - run(['HW_CPU_X86_TBM'], [cn2.id, cn3.id]) - # Just the one - run(['HW_CPU_X86_TSX'], [cn3.id]) - run(['HW_CPU_X86_TSX', 'HW_CPU_X86_SGX'], [cn3.id]) - run(['CUSTOM_FOO'], [cn4.id]) - # Including the common one still just gets me cn3 - run(['HW_CPU_X86_TBM', 'HW_CPU_X86_SGX'], [cn3.id]) - run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'HW_CPU_X86_SGX'], [cn3.id]) - # Can't be satisfied - run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'CUSTOM_FOO'], []) - run(['HW_CPU_X86_TBM', 'HW_CPU_X86_TSX', 'HW_CPU_X86_SGX', - 'CUSTOM_FOO'], []) - run(['HW_CPU_X86_SGX', 'HW_CPU_X86_SSE3'], []) - run(['HW_CPU_X86_TBM', 'CUSTOM_FOO'], []) - run(['HW_CPU_X86_BMI'], []) - rp_obj.Trait(self.ctx, name='CUSTOM_BAR').create() - run(['CUSTOM_BAR'], []) - - -class AllocationCandidatesTestCase(tb.PlacementDbBaseTestCase): - """Tests a variety of scenarios with both shared and non-shared resource - providers that the AllocationCandidates.get_by_requests() method returns a - set of alternative allocation requests and provider summaries that may be - used by the scheduler to sort/weigh the options it has for claiming - resources against providers. - """ - - def setUp(self): - super(AllocationCandidatesTestCase, self).setUp() - self.requested_resources = { - fields.ResourceClass.VCPU: 1, - fields.ResourceClass.MEMORY_MB: 64, - fields.ResourceClass.DISK_GB: 1500, - } - # For debugging purposes, populated by _create_provider and used by - # _validate_allocation_requests to make failure results more readable. - self.rp_uuid_to_name = {} - - def _get_allocation_candidates(self, requests=None, limit=None): - if requests is None: - requests = {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources)} - return rp_obj.AllocationCandidates.get_by_requests(self.ctx, requests, - limit) - - def _validate_allocation_requests(self, expected, candidates): - """Assert correctness of allocation requests in allocation candidates. - - This is set up to make it easy for the caller to specify the expected - result, to make that expected structure readable for someone looking at - the test case, and to make test failures readable for debugging. - - :param expected: A list of lists of tuples representing the expected - allocation requests, of the form: - [ - [(resource_provider_name, resource_class_name, resource_count), - ..., - ], - ... - ] - :param candidates: The result from AllocationCandidates.get_by_requests - """ - # Extract/convert allocation requests from candidates - observed = [] - for ar in candidates.allocation_requests: - rrs = [] - for rr in ar.resource_requests: - rrs.append((self.rp_uuid_to_name[rr.resource_provider.uuid], - rr.resource_class, rr.amount)) - rrs.sort() - observed.append(rrs) - observed.sort() - - # Sort the guts of the expected structure - for rr in expected: - rr.sort() - expected.sort() - - # Now we ought to be able to compare them - self.assertEqual(expected, observed) - - def _validate_provider_summary_resources(self, expected, candidates): - """Assert correctness of the resources in provider summaries in - allocation candidates. - - This is set up to make it easy for the caller to specify the expected - result, to make that expected structure readable for someone looking at - the test case, and to make test failures readable for debugging. - - :param expected: A dict, keyed by resource provider name, of sets of - 3-tuples containing resource class, capacity, and - amount used: - { resource_provider_name: set([ - (resource_class, capacity, used), - ..., - ]), - ..., - } - :param candidates: The result from AllocationCandidates.get_by_requests - """ - observed = {} - for psum in candidates.provider_summaries: - rpname = self.rp_uuid_to_name[psum.resource_provider.uuid] - reslist = set() - for res in psum.resources: - reslist.add((res.resource_class, res.capacity, res.used)) - if rpname in observed: - self.fail("Found resource provider %s more than once in " - "provider_summaries!" % rpname) - observed[rpname] = reslist - - # Now we ought to be able to compare them - self.assertEqual(expected, observed) - - def _validate_provider_summary_traits(self, expected, candidates): - """Assert correctness of the traits in provider summaries in allocation - candidates. - - This is set up to make it easy for the caller to specify the expected - result, to make that expected structure readable for someone looking at - the test case, and to make test failures readable for debugging. - - :param expected: A dict, keyed by resource provider name, of sets of - string trait names: - { resource_provider_name: set([ - trait_name, ... - ]), - ..., - } - :param candidates: The result from AllocationCandidates.get_by_requests - """ - observed = {} - for psum in candidates.provider_summaries: - rpname = self.rp_uuid_to_name[psum.resource_provider.uuid] - observed[rpname] = set(trait.name for trait in psum.traits) - - self.assertEqual(expected, observed) - - def test_unknown_traits(self): - missing = set(['UNKNOWN_TRAIT']) - requests = {'': placement_lib.RequestGroup( - use_same_provider=False, resources=self.requested_resources, - required_traits=missing)} - self.assertRaises(exception.TraitNotFound, - rp_obj.AllocationCandidates.get_by_requests, - self.ctx, requests) - - def test_allc_req_and_prov_summary(self): - """Simply test with one resource provider that the allocation - requests returned by AllocationCandidates have valid - allocation_requests and provider_summaries. - """ - cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 1 - } - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1)] - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 8, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 2000, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_get_allc_req_old_records(self): - """Simulate an old resource provider record in the database that has no - root_provider_uuid set and ensure that we still get that candidate - returned. - """ - # Passing a non-existing resource provider UUID should return an empty - # list - rp_tbl = rp_obj._RP_TBL - inv_tbl = rp_obj._INV_TBL - alloc_tbl = rp_obj._ALLOC_TBL - conn = self.placement_db.get_engine().connect() - - # First, set up a record for an "old-style" resource provider with no - # root provider UUID. - ins_rptbl = rp_tbl.insert().values( - id=1, - uuid=uuids.rp1, - name='cn1', - root_provider_id=None, - parent_provider_id=None, - generation=42, - ) - conn.execute(ins_rptbl) - - # This is needed for _validate_allocation_requests() at the end - self.rp_uuid_to_name[uuids.rp1] = 'cn1' - - # Add VCPU(resource_class_id=0) inventory to the provider. - ins_invtbl = inv_tbl.insert().values( - id=1, - resource_provider_id=1, - resource_class_id=0, - total=8, - reserved=0, - min_unit=1, - max_unit=8, - step_size=1, - allocation_ratio=1.0, - ) - conn.execute(ins_invtbl) - - # Consume VCPU inventory - ins_alloctbl = alloc_tbl.insert().values( - id=1, - resource_provider_id=1, - consumer_id=uuids.consumer, - resource_class_id=0, - used=4 - ) - conn.execute(ins_alloctbl) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 1 - } - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1)] - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 8, 4) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # NOTE(tetsuro): Getting allocation candidates goes through a - # different path when sharing/nested providers exist, let's test - # that case and the path creating a new sharing provider. - # We omit the direct database insertion of 'ss1' here since 'cn1', - # which has no root id in the database, is the actual target of the - # following test. - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.VCPU, 8) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 1 - } - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1)], - [('ss1', fields.ResourceClass.VCPU, 1)] - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 8, 4) - ]), - 'ss1': set([ - (fields.ResourceClass.VCPU, 8, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_all_local(self): - """Create some resource providers that can satisfy the request for - resources with local (non-shared) resources and verify that the - allocation requests returned by AllocationCandidates correspond with - each of these resource providers. - """ - # Create three compute node providers with VCPU, RAM and local disk - cn1, cn2, cn3 = (self._create_provider(name) - for name in ('cn1', 'cn2', 'cn3')) - for cn in (cn1, cn2, cn3): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768, - min_unit=64, step_size=64, allocation_ratio=1.5) - total_gb = 1000 if cn.name == 'cn3' else 2000 - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, total_gb, - reserved=100, min_unit=10, step_size=10, - allocation_ratio=1.0) - - # Ask for the alternative placement possibilities and verify each - # provider is returned - alloc_cands = self._get_allocation_candidates() - - # Verify the provider summary information indicates 0 usage and - # capacity calculated from above inventory numbers for the first two - # compute nodes. The third doesn't show up because it lacks sufficient - # disk capacity. - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Verify the allocation requests that are returned. There should be 2 - # allocation requests, one for each compute node, containing 3 - # resources in each allocation request, one each for VCPU, RAM, and - # disk. The amounts of the requests should correspond to the requested - # resource amounts in the filter:resources dict passed to - # AllocationCandidates.get_by_requests(). - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - # Now let's add traits into the mix. Currently, none of the compute - # nodes has the AVX2 trait associated with it, so we should get 0 - # results if we required AVX2 - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]) - )}, - ) - self._validate_allocation_requests([], alloc_cands) - - # If we then associate the AVX2 trait to just compute node 2, we should - # get back just that compute node in the provider summaries - tb.set_traits(cn2, 'HW_CPU_X86_AVX2') - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]) - )}, - ) - # Only cn2 should be in our allocation requests now since that's the - # only one with the required trait - expected = [ - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - p_sums = alloc_cands.provider_summaries - self.assertEqual(1, len(p_sums)) - - expected = { - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 32768 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - expected = { - 'cn2': set(['HW_CPU_X86_AVX2']) - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Confirm that forbidden traits changes the results to get cn1. - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - forbidden_traits=set([os_traits.HW_CPU_X86_AVX2]) - )}, - ) - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - def test_all_local_limit(self): - """Create some resource providers that can satisfy the request for - resources with local (non-shared) resources, limit them, and verify - that the allocation requests returned by AllocationCandidates - correspond with each of these resource providers. - """ - # Create three compute node providers with VCPU, RAM and local disk - for name in ('cn1', 'cn2', 'cn3'): - cn = self._create_provider(name) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768, - min_unit=64, step_size=64, allocation_ratio=1.5) - total_gb = 1000 if name == 'cn3' else 2000 - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, total_gb, - reserved=100, min_unit=10, step_size=10, - allocation_ratio=1.0) - - # Ask for just one candidate. - limit = 1 - alloc_cands = self._get_allocation_candidates(limit=limit) - allocation_requests = alloc_cands.allocation_requests - self.assertEqual(limit, len(allocation_requests)) - - # provider summaries should have only one rp - self.assertEqual(limit, len(alloc_cands.provider_summaries)) - - # Do it again, with conf set to randomize. We can't confirm the - # random-ness but we can be sure the code path doesn't explode. - CONF.set_override('randomize_allocation_candidates', True, - group='placement') - - # Ask for two candidates. - limit = 2 - alloc_cands = self._get_allocation_candidates(limit=limit) - allocation_requests = alloc_cands.allocation_requests - self.assertEqual(limit, len(allocation_requests)) - - # provider summaries should have two rps - self.assertEqual(limit, len(alloc_cands.provider_summaries)) - - # Do it again, asking for more than are available. - limit = 5 - # We still only expect 2 because cn3 does not match default requests. - expected_length = 2 - alloc_cands = self._get_allocation_candidates(limit=limit) - allocation_requests = alloc_cands.allocation_requests - self.assertEqual(expected_length, len(allocation_requests)) - - # provider summaries should have two rps - self.assertEqual(expected_length, len(alloc_cands.provider_summaries)) - - def test_local_with_shared_disk(self): - """Create some resource providers that can satisfy the request for - resources with local VCPU and MEMORY_MB but rely on a shared storage - pool to satisfy DISK_GB and verify that the allocation requests - returned by AllocationCandidates have DISK_GB served up by the shared - storage pool resource provider and VCPU/MEMORY_MB by the compute node - providers - """ - # Create two compute node providers with VCPU, RAM and NO local disk, - # associated with the aggregate. - cn1, cn2 = (self._create_provider(name, uuids.agg) - for name in ('cn1', 'cn2')) - for cn in (cn1, cn2): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, - min_unit=64, allocation_ratio=1.5) - - # Create the shared storage pool, asociated with the same aggregate - ss = self._create_provider('shared storage', uuids.agg) - - # Give the shared storage pool some inventory of DISK_GB - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, reserved=100, - min_unit=10) - - # Mark the shared storage pool as having inventory shared among any - # provider associated via aggregate - tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE") - - # Ask for the alternative placement possibilities and verify each - # compute node provider is listed in the allocation requests as well as - # the shared storage pool provider - alloc_cands = self._get_allocation_candidates() - - # Verify the provider summary information indicates 0 usage and - # capacity calculated from above inventory numbers for both compute - # nodes and the shared provider. - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Verify the allocation requests that are returned. There should be 2 - # allocation requests, one for each compute node, containing 3 - # resources in each allocation request, one each for VCPU, RAM, and - # disk. The amounts of the requests should correspond to the requested - # resource amounts in the filter:resources dict passed to - # AllocationCandidates.get_by_requests(). The providers for VCPU and - # MEMORY_MB should be the compute nodes while the provider for the - # DISK_GB should be the shared storage pool - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - # Test for bug #1705071. We query for allocation candidates with a - # request for ONLY the DISK_GB (the resource that is shared with - # compute nodes) and no VCPU/MEMORY_MB. Before the fix for bug - # #1705071, this resulted in a KeyError - - alloc_cands = self._get_allocation_candidates( - requests={'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'DISK_GB': 10, - } - )} - ) - - # We should only have provider summary information for the sharing - # storage provider, since that's the only provider that can be - # allocated against for this request. In the future, we may look into - # returning the shared-with providers in the provider summaries, but - # that's a distant possibility. - expected = { - 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # The allocation_requests will only include the shared storage - # provider because the only thing we're requesting to allocate is - # against the provider of DISK_GB, which happens to be the shared - # storage provider. - expected = [[('shared storage', fields.ResourceClass.DISK_GB, 10)]] - self._validate_allocation_requests(expected, alloc_cands) - - # Now we're going to add a set of required traits into the request mix. - # To start off, let's request a required trait that we know has not - # been associated yet with any provider, and ensure we get no results - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]), - )} - ) - - # We have not yet associated the AVX2 trait to any provider, so we - # should get zero allocation candidates - p_sums = alloc_cands.provider_summaries - self.assertEqual(0, len(p_sums)) - - # Now, if we then associate the required trait with both of our compute - # nodes, we should get back both compute nodes since they both now - # satisfy the required traits as well as the resource request - avx2_t = rp_obj.Trait.get_by_name(self.ctx, os_traits.HW_CPU_X86_AVX2) - cn1.set_traits([avx2_t]) - cn2.set_traits([avx2_t]) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]), - )} - ) - - # There should be 2 compute node providers and 1 shared storage - # provider in the summaries. - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Let's check that the traits listed for the compute nodes include the - # AVX2 trait, and the shared storage provider in the provider summaries - # does NOT have the AVX2 trait. - expected = { - 'cn1': set(['HW_CPU_X86_AVX2']), - 'cn2': set(['HW_CPU_X86_AVX2']), - 'shared storage': set(['MISC_SHARES_VIA_AGGREGATE']), - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Forbid the AVX2 trait - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - forbidden_traits=set([os_traits.HW_CPU_X86_AVX2]), - )} - ) - # Should be no results as both cn1 and cn2 have the trait. - expected = [] - self._validate_allocation_requests(expected, alloc_cands) - - # Require the AVX2 trait but forbid CUSTOM_EXTRA_FASTER, which is - # added to cn2 - tb.set_traits(cn2, 'CUSTOM_EXTRA_FASTER') - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]), - forbidden_traits=set(['CUSTOM_EXTRA_FASTER']), - )} - ) - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - # Add disk to cn1, forbid sharing, and require the AVX2 trait. - # This should result in getting only cn1. - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2048, - allocation_ratio=1.5) - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]), - forbidden_traits=set(['MISC_SHARES_VIA_AGGREGATE']), - )} - ) - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - def test_local_with_shared_custom_resource(self): - """Create some resource providers that can satisfy the request for - resources with local VCPU and MEMORY_MB but rely on a shared resource - provider to satisfy a custom resource requirement and verify that the - allocation requests returned by AllocationCandidates have the custom - resource served up by the shared custom resource provider and - VCPU/MEMORY_MB by the compute node providers - """ - # The aggregate that will be associated to everything... - agg_uuid = uuids.agg - - # Create two compute node providers with VCPU, RAM and NO local - # CUSTOM_MAGIC resources, associated with the aggregate. - for name in ('cn1', 'cn2'): - cn = self._create_provider(name, agg_uuid) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, - min_unit=64, allocation_ratio=1.5) - - # Create a custom resource called MAGIC - magic_rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_MAGIC', - ) - magic_rc.create() - - # Create the shared provider that serves CUSTOM_MAGIC, associated with - # the same aggregate - magic_p = self._create_provider('shared custom resource provider', - agg_uuid) - tb.add_inventory(magic_p, magic_rc.name, 2048, reserved=1024, - min_unit=10) - - # Mark the magic provider as having inventory shared among any provider - # associated via aggregate - tb.set_traits(magic_p, "MISC_SHARES_VIA_AGGREGATE") - - # The resources we will request - requested_resources = { - fields.ResourceClass.VCPU: 1, - fields.ResourceClass.MEMORY_MB: 64, - magic_rc.name: 512, - } - - alloc_cands = self._get_allocation_candidates( - requests={'': placement_lib.RequestGroup( - use_same_provider=False, resources=requested_resources)}) - - # Verify the allocation requests that are returned. There should be 2 - # allocation requests, one for each compute node, containing 3 - # resources in each allocation request, one each for VCPU, RAM, and - # MAGIC. The amounts of the requests should correspond to the requested - # resource amounts in the filter:resources dict passed to - # AllocationCandidates.get_by_requests(). The providers for VCPU and - # MEMORY_MB should be the compute nodes while the provider for the - # MAGIC should be the shared custom resource provider. - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared custom resource provider', magic_rc.name, 512)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('shared custom resource provider', magic_rc.name, 512)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'shared custom resource provider': set([ - (magic_rc.name, 1024, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_mix_local_and_shared(self): - # Create three compute node providers with VCPU and RAM, but only - # the third compute node has DISK. The first two computes will - # share the storage from the shared storage pool. - cn1, cn2 = (self._create_provider(name, uuids.agg) - for name in ('cn1', 'cn2')) - # cn3 is not associated with the aggregate - cn3 = self._create_provider('cn3') - for cn in (cn1, cn2, cn3): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, - min_unit=64, allocation_ratio=1.5) - # Only cn3 has disk - tb.add_inventory(cn3, fields.ResourceClass.DISK_GB, 2000, - reserved=100, min_unit=10) - - # Create the shared storage pool in the same aggregate as the first two - # compute nodes - ss = self._create_provider('shared storage', uuids.agg) - - # Give the shared storage pool some inventory of DISK_GB - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, reserved=100, - min_unit=10) - - tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE") - - alloc_cands = self._get_allocation_candidates() - - # Expect cn1, cn2, cn3 and ss in the summaries - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn3': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Expect three allocation requests: (cn1, ss), (cn2, ss), (cn3) - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('cn2', fields.ResourceClass.MEMORY_MB, 64), - ('shared storage', fields.ResourceClass.DISK_GB, 1500)], - [('cn3', fields.ResourceClass.VCPU, 1), - ('cn3', fields.ResourceClass.MEMORY_MB, 64), - ('cn3', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - # Now we're going to add a set of required traits into the request mix. - # To start off, let's request a required trait that we know has not - # been associated yet with any provider, and ensure we get no results - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]), - )} - ) - - # We have not yet associated the AVX2 trait to any provider, so we - # should get zero allocation candidates - p_sums = alloc_cands.provider_summaries - self.assertEqual(0, len(p_sums)) - a_reqs = alloc_cands.allocation_requests - self.assertEqual(0, len(a_reqs)) - - # Now, if we then associate the required trait with all of our compute - # nodes, we should get back all compute nodes since they all now - # satisfy the required traits as well as the resource request - for cn in (cn1, cn2, cn3): - tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2) - - alloc_cands = self._get_allocation_candidates(requests={'': - placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([os_traits.HW_CPU_X86_AVX2]), - )} - ) - - # There should be 3 compute node providers and 1 shared storage - # provider in the summaries. - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - ]), - 'cn3': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - 'shared storage': set([ - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Let's check that the traits listed for the compute nodes include the - # AVX2 trait, and the shared storage provider in the provider summaries - # does NOT have the AVX2 trait - expected = { - 'cn1': set(['HW_CPU_X86_AVX2']), - 'cn2': set(['HW_CPU_X86_AVX2']), - 'cn3': set(['HW_CPU_X86_AVX2']), - 'shared storage': set(['MISC_SHARES_VIA_AGGREGATE']), - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Now, let's add a new wrinkle to the equation and add a required trait - # that will ONLY be satisfied by a compute node with local disk that - # has SSD drives. Set this trait only on the compute node with local - # disk (cn3) - tb.set_traits(cn3, os_traits.HW_CPU_X86_AVX2, - os_traits.STORAGE_DISK_SSD) - - alloc_cands = self._get_allocation_candidates( - {'': - placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set([ - os_traits.HW_CPU_X86_AVX2, os_traits.STORAGE_DISK_SSD - ]), - )} - ) - - # There should be only cn3 in the returned allocation candidates - expected = [ - [('cn3', fields.ResourceClass.VCPU, 1), - ('cn3', fields.ResourceClass.MEMORY_MB, 64), - ('cn3', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn3': set([ - (fields.ResourceClass.VCPU, 24 * 16.0, 0), - (fields.ResourceClass.MEMORY_MB, 1024 * 1.5, 0), - (fields.ResourceClass.DISK_GB, 2000 - 100, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - expected = { - 'cn3': set(['HW_CPU_X86_AVX2', 'STORAGE_DISK_SSD']) - } - self._validate_provider_summary_traits(expected, alloc_cands) - - def test_common_rc(self): - """Candidates when cn and shared have inventory in the same class.""" - cn = self._create_provider('cn', uuids.agg1) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 1600) - - ss = self._create_provider('ss', uuids.agg1) - tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000) - - alloc_cands = self._get_allocation_candidates() - - # One allocation_request should have cn + ss; the other should have - # just the cn. - expected = [ - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('cn', fields.ResourceClass.DISK_GB, 1500)], - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('ss', fields.ResourceClass.DISK_GB, 1500)], - ] - - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Next let's increase the requested DISK_GB - requested_resources = { - fields.ResourceClass.VCPU: 1, - fields.ResourceClass.MEMORY_MB: 64, - fields.ResourceClass.DISK_GB: 1800, - } - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=requested_resources, - )} - ) - - expected = [ - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('ss', fields.ResourceClass.DISK_GB, 1800)], - ] - - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_common_rc_traits_split(self): - """Validate filters when traits are split across cn and shared RPs.""" - # NOTE(efried): This test case only applies to the scenario where we're - # requesting resources via the RequestGroup where - # use_same_provider=False - - cn = self._create_provider('cn', uuids.agg1) - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 1600) - # The compute node's disk is SSD - tb.set_traits(cn, 'HW_CPU_X86_SSE', 'STORAGE_DISK_SSD') - - ss = self._create_provider('ss', uuids.agg1) - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 1600) - # The shared storage's disk is RAID - tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_RAID') - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources=self.requested_resources, - required_traits=set(['HW_CPU_X86_SSE', 'STORAGE_DISK_SSD', - 'CUSTOM_RAID']) - )} - ) - - # TODO(efried): Bug #1724633: we'd *like* to get no candidates, because - # there's no single DISK_GB resource with both STORAGE_DISK_SSD and - # CUSTOM_RAID traits. - # expected = [] - expected = [ - [('cn', fields.ResourceClass.VCPU, 1), - ('cn', fields.ResourceClass.MEMORY_MB, 64), - ('ss', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - # expected = {} - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_only_one_sharing_provider(self): - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'IPV4_ADDRESS': 2, - 'SRIOV_NET_VF': 1, - 'DISK_GB': 1500, - } - )} - ) - - expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)] - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_all_sharing_providers_no_rc_overlap(self): - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - - ss2 = self._create_provider('ss2', uuids.agg1) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'IPV4_ADDRESS': 2, - 'DISK_GB': 1500, - } - )} - ) - - expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - ]), - 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_all_sharing_providers_no_rc_overlap_more_classes(self): - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16) - - ss2 = self._create_provider('ss2', uuids.agg1) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'IPV4_ADDRESS': 2, - 'SRIOV_NET_VF': 1, - 'DISK_GB': 1500, - } - )} - ) - - expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss2', fields.ResourceClass.DISK_GB, 1500)] - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0) - ]), - 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_all_sharing_providers(self): - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss1, fields.ResourceClass.SRIOV_NET_VF, 16) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - ss2 = self._create_provider('ss2', uuids.agg1) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16) - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) - - alloc_cands = self._get_allocation_candidates(requests={ - '': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'IPV4_ADDRESS': 2, - 'SRIOV_NET_VF': 1, - 'DISK_GB': 1500, - } - )} - ) - - # We expect four candidates: - # - gets all the resources from ss1, - # - gets the SRIOV_NET_VF from ss2 and the rest from ss1, - # - gets the DISK_GB from ss2 and the rest from ss1, - # - gets SRIOV_NET_VF and DISK_GB from ss2 and rest from ss1 - expected = [ - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('ss1', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'ss1': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - (fields.ResourceClass.DISK_GB, 1600, 0) - ]), - 'ss2': set([ - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_two_non_sharing_connect_to_one_sharing_different_aggregate(self): - # Covering the following setup: - # - # CN1 (VCPU) CN2 (VCPU) - # \ agg1 / agg2 - # SS1 (DISK_GB) - # - # It is different from test_mix_local_and_shared as it uses two - # different aggregates to connect the two CNs to the share RP - cn1 = self._create_provider('cn1', uuids.agg1) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - - cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn2, fields.ResourceClass.MEMORY_MB, 2048) - - ss1 = self._create_provider('ss1', uuids.agg1, uuids.agg2) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'DISK_GB': 1500, - } - )} - ) - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_two_non_sharing_one_common_and_two_unique_sharing(self): - # Covering the following setup: - # - # CN1 (VCPU) CN2 (VCPU) - # / agg3 \ agg1 / agg1 \ agg2 - # SS3 (IPV4) SS1 (DISK_GB) SS2 (IPV4) - cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg3) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - - cn2 = self._create_provider('cn2', uuids.agg1, uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn2, fields.ResourceClass.MEMORY_MB, 2048) - - # ss1 is connected to both cn1 and cn2 - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - # ss2 only connected to cn2 - ss2 = self._create_provider('ss2', uuids.agg2) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24) - - # ss3 only connected to cn1 - ss3 = self._create_provider('ss3', uuids.agg3) - tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss3, fields.ResourceClass.IPV4_ADDRESS, 24) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'DISK_GB': 1500, - 'IPV4_ADDRESS': 2, - } - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss3', fields.ResourceClass.IPV4_ADDRESS, 2)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss2': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - ]), - 'ss3': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_rc_not_split_between_sharing_and_non_sharing(self): - # cn1(VCPU,MEM) Non-sharing RP with some of the resources - # | agg1 aggregated with - # ss1(DISK) sharing RP that has the rest of the resources - # - # cn2(VCPU) Non-sharing with one of the resources; - # / agg2 \ aggregated with multiple sharing providers - # ss2_1(MEM) ss2_2(DISK) with different resources. - - cn1 = self._create_provider('cn1', uuids.agg1) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - ss1 = self._create_provider('ss1', uuids.agg1) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 2000) - tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE') - - cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - ss2_1 = self._create_provider('ss2_1', uuids.agg2) - tb.add_inventory(ss2_1, fields.ResourceClass.MEMORY_MB, 2048) - tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE') - ss2_2 = self._create_provider('ss2_2', uuids.agg2) - tb.add_inventory(ss2_2, fields.ResourceClass.DISK_GB, 2000) - tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE') - - alloc_cands = self._get_allocation_candidates() - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('ss2_1', fields.ResourceClass.MEMORY_MB, 64), - ('ss2_2', fields.ResourceClass.DISK_GB, 1500)], - ] - - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - ]), - 'ss2_1': set([ - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss2_2': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_multiple_sharing_providers_with_same_rc(self): - # cn1(VCPU,MEM) Non-sharing with some of the resources; - # / agg1 \ aggregated with multiple sharing providers - # ss1_1(DISK) ss1_2(DISK) with the same resource. - # - # cn2(VCPU) Non-sharing with one of the resources; - # / agg2 \ aggregated with multiple sharing providers - # ss2_1(MEM) ss2_2(DISK) with different resources. - - cn1 = self._create_provider('cn1', uuids.agg1) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - ss1_1 = self._create_provider('ss1_1', uuids.agg1) - tb.add_inventory(ss1_1, fields.ResourceClass.DISK_GB, 2000) - tb.set_traits(ss1_1, 'MISC_SHARES_VIA_AGGREGATE') - ss1_2 = self._create_provider('ss1_2', uuids.agg1) - tb.add_inventory(ss1_2, fields.ResourceClass.DISK_GB, 2000) - tb.set_traits(ss1_2, 'MISC_SHARES_VIA_AGGREGATE') - - cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - ss2_1 = self._create_provider('ss2_1', uuids.agg2) - tb.add_inventory(ss2_1, fields.ResourceClass.MEMORY_MB, 2048) - tb.set_traits(ss2_1, 'MISC_SHARES_VIA_AGGREGATE') - ss2_2 = self._create_provider('ss2_2', uuids.agg2) - tb.add_inventory(ss2_2, fields.ResourceClass.DISK_GB, 2000) - tb.set_traits(ss2_2, 'MISC_SHARES_VIA_AGGREGATE') - - alloc_cands = self._get_allocation_candidates() - expected = [ - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('ss1_1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 1), - ('cn1', fields.ResourceClass.MEMORY_MB, 64), - ('ss1_2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 1), - ('ss2_1', fields.ResourceClass.MEMORY_MB, 64), - ('ss2_2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss1_1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - 'ss1_2': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - ]), - 'ss2_1': set([ - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss2_2': set([ - (fields.ResourceClass.DISK_GB, 2000, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_sharing_providers_member_of(self): - # Covering the following setup: - # - # CN1 (VCPU, DISK_GB) CN2 (VCPU, DISK_GB) - # / agg1 \ agg2 / agg2 \ agg3 - # SS1 (DISK_GB) SS2 (DISK_GB) SS3 (DISK_GB) - cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 1600) - - cn2 = self._create_provider('cn2', uuids.agg2, uuids.agg3) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn2, fields.ResourceClass.DISK_GB, 1600) - - # ss1 is connected to cn1 - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - # ss2 is connected to both cn1 and cn2 - ss2 = self._create_provider('ss2', uuids.agg2) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1600) - - # ss3 is connected to cn2 - ss3 = self._create_provider('ss3', uuids.agg3) - tb.set_traits(ss3, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss3, fields.ResourceClass.DISK_GB, 1600) - - # Let's get allocation candidates from agg1 - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'DISK_GB': 1500, - }, - member_of=[[uuids.agg1]] - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Let's get allocation candidates from agg2 - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'DISK_GB': 1500, - }, - member_of=[[uuids.agg2]] - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Let's move to validate multiple member_of scenario - # The request from agg1 *AND* agg2 would provide only - # resources from cn1 with its local DISK - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'DISK_GB': 1500, - }, - member_of=[[uuids.agg1], [uuids.agg2]] - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # The request from agg1 *OR* agg2 would provide five candidates - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'DISK_GB': 1500, - }, - member_of=[[uuids.agg1, uuids.agg2]] - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('cn2', fields.ResourceClass.DISK_GB, 1500)], - [('cn2', fields.ResourceClass.VCPU, 2), - ('ss2', fields.ResourceClass.DISK_GB, 1500)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'cn2': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss2': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_two_sharing_indirectly_connected_connecting_not_give_resource( - self): - # This covers the following setup - # CN1 (VCPU, MEMORY_MB) - # / \ - # /agg1 \agg2 - # / \ - # SS1 ( SS2 ( - # DISK_GB) IPV4_ADDRESS - # SRIOV_NET_VF) - # The request then made for resources from the sharing RPs only - - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - - ss2 = self._create_provider('ss2', uuids.agg2) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'IPV4_ADDRESS': 2, - 'SRIOV_NET_VF': 1, - 'DISK_GB': 1500, - } - )} - ) - - expected = [ - [('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss2': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_two_sharing_indirectly_connected_connecting_gives_resource(self): - # This covers the following setup - # CN1 (VCPU, MEMORY_MB) - # / \ - # /agg1 \agg2 - # / \ - # SS1 ( SS2 ( - # DISK_GB) IPV4_ADDRESS - # SRIOV_NET_VF) - # The request then made for resources from all three RPs - - ss1 = self._create_provider('ss1', uuids.agg1) - tb.set_traits(ss1, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 1600) - - cn1 = self._create_provider('cn1', uuids.agg1, uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 24) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - - ss2 = self._create_provider('ss2', uuids.agg2) - tb.set_traits(ss2, "MISC_SHARES_VIA_AGGREGATE") - tb.add_inventory(ss2, fields.ResourceClass.IPV4_ADDRESS, 24) - tb.add_inventory(ss2, fields.ResourceClass.SRIOV_NET_VF, 16) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'IPV4_ADDRESS': 2, - 'SRIOV_NET_VF': 1, - 'DISK_GB': 1500, - } - )} - ) - - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('ss1', fields.ResourceClass.DISK_GB, 1500), - ('ss2', fields.ResourceClass.IPV4_ADDRESS, 2), - ('ss2', fields.ResourceClass.SRIOV_NET_VF, 1)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 24, 0), - (fields.ResourceClass.MEMORY_MB, 2048, 0), - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 1600, 0), - ]), - 'ss2': set([ - (fields.ResourceClass.IPV4_ADDRESS, 24, 0), - (fields.ResourceClass.SRIOV_NET_VF, 16, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_simple_tree_of_providers(self): - """Tests that we properly winnow allocation requests when including - traits in the request group and that the traits appear in the provider - summaries of the returned allocation candidates - """ - # We are setting up a single tree that looks like this: - # - # compute node (cn) - # / \ - # / \ - # numa cell 0 numa cell 1 - # | | - # | | - # pf 0 pf 1 - # - # The second physical function will be associated with the - # HW_NIC_OFFLOAD_GENEVE trait, but not the first physical function. - # - # We will issue a request to _get_allocation_candidates() for VCPU, - # MEMORY_MB and SRIOV_NET_VF **without** required traits, then include - # a request that includes HW_NIC_OFFLOAD_GENEVE. In the latter case, - # the compute node tree should be returned but the allocation requests - # should only include the second physical function since the required - # trait is only associated with that PF. - # - # Subsequently, we will consume all the SRIOV_NET_VF resources from the - # second PF's inventory and attempt the same request of resources and - # HW_NIC_OFFLOAD_GENEVE. We should get 0 returned results because now - # the only PF that has the required trait has no inventory left. - cn = self._create_provider('cn') - - tb.add_inventory(cn, fields.ResourceClass.VCPU, 16) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768) - - numa_cell0 = self._create_provider('cn_numa0', parent=cn.uuid) - numa_cell1 = self._create_provider('cn_numa1', parent=cn.uuid) - - pf0 = self._create_provider('cn_numa0_pf0', parent=numa_cell0.uuid) - tb.add_inventory(pf0, fields.ResourceClass.SRIOV_NET_VF, 8) - pf1 = self._create_provider('cn_numa1_pf1', parent=numa_cell1.uuid) - tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE) - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.MEMORY_MB: 256, - fields.ResourceClass.SRIOV_NET_VF: 1, - } - )} - ) - - expected = [ - [('cn', fields.ResourceClass.VCPU, 2), - ('cn', fields.ResourceClass.MEMORY_MB, 256), - ('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)], - [('cn', fields.ResourceClass.VCPU, 2), - ('cn', fields.ResourceClass.MEMORY_MB, 256), - ('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), - ]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - expected = { - 'cn': set([]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([]), - 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]), - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Now add required traits to the mix and verify we still get the same - # result (since we haven't yet consumed the second physical function's - # inventory of SRIOV_NET_VF. - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.MEMORY_MB: 256, - fields.ResourceClass.SRIOV_NET_VF: 1, - }, - required_traits=set([os_traits.HW_NIC_OFFLOAD_GENEVE]), - )} - ) - - expected = [ - [('cn', fields.ResourceClass.VCPU, 2), - ('cn', fields.ResourceClass.MEMORY_MB, 256), - ('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), - ]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - expected = { - 'cn': set([]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([]), - 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]), - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Next we test that we get resources only on non-root providers - # without root providers involved - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.SRIOV_NET_VF: 1, - }, - )} - ) - - expected = [ - [('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)], - [('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), - ]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - expected = { - 'cn': set([]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([]), - 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]), - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Same, but with the request in a granular group, which hits a - # different code path. - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=True, - resources={ - fields.ResourceClass.SRIOV_NET_VF: 1, - }, - )} - ) - - expected = [ - [('cn_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1)], - [('cn_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1)], - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn': set([ - (fields.ResourceClass.VCPU, 16, 0), - (fields.ResourceClass.MEMORY_MB, 32768, 0), - ]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - 'cn_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0), - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - expected = { - 'cn': set([]), - 'cn_numa0': set([]), - 'cn_numa1': set([]), - 'cn_numa0_pf0': set([]), - 'cn_numa1_pf1': set([os_traits.HW_NIC_OFFLOAD_GENEVE]), - } - self._validate_provider_summary_traits(expected, alloc_cands) - - # Now consume all the inventory of SRIOV_NET_VF on the second physical - # function (the one with HW_NIC_OFFLOAD_GENEVE associated with it) and - # verify that the same request still results in 0 results since the - # function with the required trait no longer has any inventory. - self.allocate_from_provider(pf1, fields.ResourceClass.SRIOV_NET_VF, 8) - - alloc_cands = self._get_allocation_candidates( - {'': - placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.MEMORY_MB: 256, - fields.ResourceClass.SRIOV_NET_VF: 1, - }, - required_traits=set([os_traits.HW_NIC_OFFLOAD_GENEVE]), - )} - ) - - self._validate_allocation_requests([], alloc_cands) - self._validate_provider_summary_resources({}, alloc_cands) - self._validate_provider_summary_traits({}, alloc_cands) - - def _get_rp_ids_matching_names(self, names): - """Utility function to look up resource provider IDs from a set of - supplied provider names directly from the API DB. - """ - names = map(six.text_type, names) - sel = sa.select([rp_obj._RP_TBL.c.id]) - sel = sel.where(rp_obj._RP_TBL.c.name.in_(names)) - with self.placement_db.get_engine().connect() as conn: - rp_ids = set([r[0] for r in conn.execute(sel)]) - return rp_ids - - def test_trees_matching_all(self): - """Creates a few provider trees having different inventories and - allocations and tests the _get_trees_matching_all_resources() utility - function to ensure that only the root provider IDs of matching provider - trees are returned. - """ - # NOTE(jaypipes): _get_trees_matching_all() expects a dict of resource - # class internal identifiers, not string names - resources = { - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.VCPU): 2, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.MEMORY_MB): 256, - fields.ResourceClass.STANDARD.index( - fields.ResourceClass.SRIOV_NET_VF): 1, - } - req_traits = {} - forbidden_traits = {} - member_of = [] - sharing = {} - - # Before we even set up any providers, verify that the short-circuits - # work to return empty lists - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - self.assertEqual([], trees) - - # We are setting up 3 trees of providers that look like this: - # - # compute node (cn) - # / \ - # / \ - # numa cell 0 numa cell 1 - # | | - # | | - # pf 0 pf 1 - cn_names = [] - for x in ('1', '2', '3'): - name = 'cn' + x - cn_name = name - cn_names.append(cn_name) - cn = self._create_provider(name) - - tb.add_inventory(cn, fields.ResourceClass.VCPU, 16) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768) - - name = 'cn' + x + '_numa0' - numa_cell0 = self._create_provider(name, parent=cn.uuid) - name = 'cn' + x + '_numa1' - numa_cell1 = self._create_provider(name, parent=cn.uuid) - - name = 'cn' + x + '_numa0_pf0' - pf0 = self._create_provider(name, parent=numa_cell0.uuid) - tb.add_inventory(pf0, fields.ResourceClass.SRIOV_NET_VF, 8) - name = 'cn' + x + '_numa1_pf1' - pf1 = self._create_provider(name, parent=numa_cell1.uuid) - tb.add_inventory(pf1, fields.ResourceClass.SRIOV_NET_VF, 8) - # Mark only the second PF on the third compute node as having - # GENEVE offload enabled - if x == '3': - tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE) - # Doesn't really make a whole lot of logical sense, but allows - # us to test situations where the same trait is associated with - # multiple providers in the same tree and one of the providers - # has inventory we will use... - tb.set_traits(cn, os_traits.HW_NIC_OFFLOAD_GENEVE) - - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - # trees is a list of two-tuples of (provider ID, root provider ID) - tree_root_ids = set(p[1] for p in trees) - expect_root_ids = self._get_rp_ids_matching_names(cn_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # let's validate providers in tree as well - provider_ids = set(p[0] for p in trees) - provider_names = cn_names + ['cn1_numa0_pf0', 'cn1_numa1_pf1', - 'cn2_numa0_pf0', 'cn2_numa1_pf1', - 'cn3_numa0_pf0', 'cn3_numa1_pf1'] - expect_provider_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_provider_ids, provider_ids) - - # OK, now consume all the VFs in the second compute node and verify - # only the first and third computes are returned as root providers from - # _get_trees_matching_all() - cn2_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, - uuids.cn2_numa0_pf0) - self.allocate_from_provider(cn2_pf0, fields.ResourceClass.SRIOV_NET_VF, - 8) - - cn2_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, - uuids.cn2_numa1_pf1) - self.allocate_from_provider(cn2_pf1, fields.ResourceClass.SRIOV_NET_VF, - 8) - - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - tree_root_ids = set(p[1] for p in trees) - self.assertEqual(2, len(tree_root_ids)) - - # cn2 had all its VFs consumed, so we should only get cn1 and cn3's IDs - # as the root provider IDs. - cn_names = ['cn1', 'cn3'] - expect_root_ids = self._get_rp_ids_matching_names(cn_names) - self.assertEqual(expect_root_ids, set(tree_root_ids)) - - # let's validate providers in tree as well - provider_ids = set(p[0] for p in trees) - provider_names = cn_names + ['cn1_numa0_pf0', 'cn1_numa1_pf1', - 'cn3_numa0_pf0', 'cn3_numa1_pf1'] - expect_provider_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_provider_ids, provider_ids) - - # OK, now we're going to add a required trait to the mix. The only - # provider that is decorated with the HW_NIC_OFFLOAD_GENEVE trait is - # the second physical function on the third compute host. So we should - # only get the third compute node back if we require that trait - - geneve_t = rp_obj.Trait.get_by_name( - self.ctx, os_traits.HW_NIC_OFFLOAD_GENEVE) - # required_traits parameter is a dict of trait name to internal ID - req_traits = { - geneve_t.name: geneve_t.id, - } - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - tree_root_ids = set(p[1] for p in trees) - self.assertEqual(1, len(tree_root_ids)) - - cn_names = ['cn3'] - expect_root_ids = self._get_rp_ids_matching_names(cn_names) - self.assertEqual(expect_root_ids, set(tree_root_ids)) - - # let's validate providers in tree as well - provider_ids = set(p[0] for p in trees) - # NOTE(tetsuro): Actually we also get providers without traits here. - # This is reported as bug#1771707 and from users' view the bug is now - # fixed out of this _get_trees_matching_all() function by checking - # traits later again in _check_traits_for_alloc_request(). - # But ideally, we'd like to have only pf1 from cn3 here using SQL - # query in _get_trees_matching_all() function for optimization. - # provider_names = cn_names + ['cn3_numa1_pf1'] - provider_names = cn_names + ['cn3_numa0_pf0', 'cn3_numa1_pf1'] - expect_provider_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_provider_ids, provider_ids) - - # Add in a required trait that no provider has associated with it and - # verify that there are no returned allocation candidates - avx2_t = rp_obj.Trait.get_by_name( - self.ctx, os_traits.HW_CPU_X86_AVX2) - # required_traits parameter is a dict of trait name to internal ID - req_traits = { - geneve_t.name: geneve_t.id, - avx2_t.name: avx2_t.id, - } - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - tree_root_ids = set(p[1] for p in trees) - self.assertEqual(0, len(tree_root_ids)) - - # If we add the AVX2 trait as forbidden, not required, then we - # should get back the original cn3 - req_traits = { - geneve_t.name: geneve_t.id, - } - forbidden_traits = { - avx2_t.name: avx2_t.id, - } - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - tree_root_ids = set(p[1] for p in trees) - self.assertEqual(1, len(tree_root_ids)) - - cn_names = ['cn3'] - expect_root_ids = self._get_rp_ids_matching_names(cn_names) - self.assertEqual(expect_root_ids, set(tree_root_ids)) - - # let's validate providers in tree as well - provider_ids = set(p[0] for p in trees) - # NOTE(tetsuro): Actually we also get providers without traits here. - # This is reported as bug#1771707 and from users' view the bug is now - # fixed out of this _get_trees_matching_all() function by checking - # traits later again in _check_traits_for_alloc_request(). - # But ideally, we'd like to have only pf1 from cn3 here using SQL - # query in _get_trees_matching_all() function for optimization. - # provider_names = cn_names + ['cn3_numa1_pf1'] - provider_names = cn_names + ['cn3_numa0_pf0', 'cn3_numa1_pf1'] - expect_provider_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_provider_ids, provider_ids) - - # Consume all the VFs in first and third compute nodes and verify - # no more providers are returned - cn1_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, - uuids.cn1_numa0_pf0) - self.allocate_from_provider( - cn1_pf0, fields.ResourceClass.SRIOV_NET_VF, 8) - - cn1_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, - uuids.cn1_numa1_pf1) - self.allocate_from_provider( - cn1_pf1, fields.ResourceClass.SRIOV_NET_VF, 8) - cn3_pf0 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, - uuids.cn3_numa0_pf0) - self.allocate_from_provider( - cn3_pf0, fields.ResourceClass.SRIOV_NET_VF, 8) - - cn3_pf1 = rp_obj.ResourceProvider.get_by_uuid(self.ctx, - uuids.cn3_numa1_pf1) - self.allocate_from_provider( - cn3_pf1, fields.ResourceClass.SRIOV_NET_VF, 8) - - trees = rp_obj._get_trees_matching_all(self.ctx, - resources, req_traits, forbidden_traits, sharing, member_of) - self.assertEqual([], trees) - - def test_simple_tree_with_shared_provider(self): - """Tests that we properly winnow allocation requests when including - shared and nested providers - """ - # We are setting up 2 cn trees with 2 shared storages - # that look like this: - # - # compute node (cn1) ----- shared storage (ss1) - # / \ agg1 with 2000 DISK_GB - # / \ - # numa cell 1_0 numa cell 1_1 - # | | - # | | - # pf 1_0 pf 1_1(HW_NIC_OFFLOAD_GENEVE) - # - # compute node (cn2) ----- shared storage (ss2) - # / \ agg2 with 1000 DISK_GB - # / \ - # numa cell 2_0 numa cell 2_1 - # | | - # | | - # pf 2_0 pf 2_1(HW_NIC_OFFLOAD_GENEVE) - # - # The second physical function in both trees (pf1_1, pf 2_1) will be - # associated with the HW_NIC_OFFLOAD_GENEVE trait, but not the first - # physical function. - # - # We will issue a request to _get_allocation_candidates() for VCPU, - # SRIOV_NET_VF and DISK_GB **without** required traits, then include - # a request that includes HW_NIC_OFFLOAD_GENEVE. In the latter case, - # the compute node tree should be returned but the allocation requests - # should only include the second physical function since the required - # trait is only associated with that PF. - - cn1 = self._create_provider('cn1', uuids.agg1) - cn2 = self._create_provider('cn2', uuids.agg2) - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 16) - tb.add_inventory(cn2, fields.ResourceClass.VCPU, 16) - - numa1_0 = self._create_provider('cn1_numa0', parent=cn1.uuid) - numa1_1 = self._create_provider('cn1_numa1', parent=cn1.uuid) - numa2_0 = self._create_provider('cn2_numa0', parent=cn2.uuid) - numa2_1 = self._create_provider('cn2_numa1', parent=cn2.uuid) - - pf1_0 = self._create_provider('cn1_numa0_pf0', parent=numa1_0.uuid) - pf1_1 = self._create_provider('cn1_numa1_pf1', parent=numa1_1.uuid) - pf2_0 = self._create_provider('cn2_numa0_pf0', parent=numa2_0.uuid) - pf2_1 = self._create_provider('cn2_numa1_pf1', parent=numa2_1.uuid) - - tb.add_inventory(pf1_0, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.add_inventory(pf1_1, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.add_inventory(pf2_0, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.add_inventory(pf2_1, fields.ResourceClass.SRIOV_NET_VF, 8) - tb.set_traits(pf2_1, os_traits.HW_NIC_OFFLOAD_GENEVE) - tb.set_traits(pf1_1, os_traits.HW_NIC_OFFLOAD_GENEVE) - - ss1 = self._create_provider('ss1', uuids.agg1) - ss2 = self._create_provider('ss2', uuids.agg2) - tb.add_inventory(ss1, fields.ResourceClass.DISK_GB, 2000) - tb.add_inventory(ss2, fields.ResourceClass.DISK_GB, 1000) - tb.set_traits(ss1, 'MISC_SHARES_VIA_AGGREGATE') - tb.set_traits(ss2, 'MISC_SHARES_VIA_AGGREGATE') - - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.SRIOV_NET_VF: 1, - fields.ResourceClass.DISK_GB: 1500, - }) - } - ) - - # cn2 is not in the allocation candidates because it doesn't have - # enough DISK_GB resource with shared providers. - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1_numa0_pf0', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)], - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)] - ] - - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 16, 0) - ]), - 'cn1_numa0': set([]), - 'cn1_numa1': set([]), - 'cn1_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) - ]), - 'cn1_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - # Now add required traits to the mix and verify we still get the - # inventory of SRIOV_NET_VF. - alloc_cands = self._get_allocation_candidates( - {'': placement_lib.RequestGroup( - use_same_provider=False, - resources={ - fields.ResourceClass.VCPU: 2, - fields.ResourceClass.SRIOV_NET_VF: 1, - fields.ResourceClass.DISK_GB: 1500, - }, - required_traits=[os_traits.HW_NIC_OFFLOAD_GENEVE]) - } - ) - - # cn1_numa0_pf0 is not in the allocation candidates because it - # doesn't have the required trait. - expected = [ - [('cn1', fields.ResourceClass.VCPU, 2), - ('cn1_numa1_pf1', fields.ResourceClass.SRIOV_NET_VF, 1), - ('ss1', fields.ResourceClass.DISK_GB, 1500)] - ] - self._validate_allocation_requests(expected, alloc_cands) - - expected = { - 'cn1': set([ - (fields.ResourceClass.VCPU, 16, 0) - ]), - 'cn1_numa0': set([]), - 'cn1_numa1': set([]), - 'cn1_numa0_pf0': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) - ]), - 'cn1_numa1_pf1': set([ - (fields.ResourceClass.SRIOV_NET_VF, 8, 0) - ]), - 'ss1': set([ - (fields.ResourceClass.DISK_GB, 2000, 0) - ]), - } - self._validate_provider_summary_resources(expected, alloc_cands) - - def test_get_trees_with_traits(self): - """Creates a few provider trees having different traits and tests the - _get_trees_with_traits() utility function to ensure that only the - root provider IDs of matching traits are returned. - """ - # We are setting up 6 trees of providers with following traits: - # - # compute node (cn) - # / \ - # pf 0 pf 1 - # - # +-----+----------------+---------------------+---------------------+ - # | | cn | pf0 | pf1 | - # +-----+----------------+---------------------+---------------------+ - # |tree1|HW_CPU_X86_AVX2 | |HW_NIC_OFFLOAD_GENEVE| - # +-----+----------------+---------------------+---------------------+ - # |tree2|STORAGE_DISK_SSD| | | - # +-----+----------------+---------------------+---------------------+ - # |tree3|HW_CPU_X86_AVX2 | | | - # | |STORAGE_DISK_SSD| | | - # +-----+----------------+---------------------+---------------------+ - # |tree4| |HW_NIC_ACCEL_SSL | | - # | | |HW_NIC_OFFLOAD_GENEVE| | - # +-----+----------------+---------------------+---------------------+ - # |tree5| |HW_NIC_ACCEL_SSL |HW_NIC_OFFLOAD_GENEVE| - # +-----+----------------+---------------------+---------------------+ - # |tree6| |HW_NIC_ACCEL_SSL |HW_NIC_ACCEL_SSL | - # +-----+----------------+---------------------+---------------------+ - # |tree7| | | | - # +-----+----------------+---------------------+---------------------+ - # - - rp_ids = set() - for x in ('1', '2', '3', '4', '5', '6', '7'): - name = 'cn' + x - cn = self._create_provider(name) - name = 'cn' + x + '_pf0' - pf0 = self._create_provider(name, parent=cn.uuid) - name = 'cn' + x + '_pf1' - pf1 = self._create_provider(name, parent=cn.uuid) - - rp_ids |= set([cn.id, pf0.id, pf1.id]) - - if x == '1': - tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2) - tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE) - if x == '2': - tb.set_traits(cn, os_traits.STORAGE_DISK_SSD) - if x == '3': - tb.set_traits(cn, os_traits.HW_CPU_X86_AVX2, - os_traits.STORAGE_DISK_SSD) - if x == '4': - tb.set_traits(pf0, os_traits.HW_NIC_ACCEL_SSL, - os_traits.HW_NIC_OFFLOAD_GENEVE) - if x == '5': - tb.set_traits(pf0, os_traits.HW_NIC_ACCEL_SSL) - tb.set_traits(pf1, os_traits.HW_NIC_OFFLOAD_GENEVE) - if x == '6': - tb.set_traits(pf0, os_traits.HW_NIC_ACCEL_SSL) - tb.set_traits(pf1, os_traits.HW_NIC_ACCEL_SSL) - - avx2_t = rp_obj.Trait.get_by_name( - self.ctx, os_traits.HW_CPU_X86_AVX2) - ssd_t = rp_obj.Trait.get_by_name( - self.ctx, os_traits.STORAGE_DISK_SSD) - geneve_t = rp_obj.Trait.get_by_name( - self.ctx, os_traits.HW_NIC_OFFLOAD_GENEVE) - ssl_t = rp_obj.Trait.get_by_name( - self.ctx, os_traits.HW_NIC_ACCEL_SSL) - - # Case1: required on root - required_traits = { - avx2_t.name: avx2_t.id, - } - forbidden_traits = {} - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn1', 'cn3'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # Case1': required on root with forbidden traits - # Let's validate that cn3 dissapears - required_traits = { - avx2_t.name: avx2_t.id, - } - forbidden_traits = { - ssd_t.name: ssd_t.id, - } - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn1'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # Case2: multiple required on root - required_traits = { - avx2_t.name: avx2_t.id, - ssd_t.name: ssd_t.id - } - forbidden_traits = {} - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn3'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # Case3: required on child - required_traits = { - geneve_t.name: geneve_t.id - } - forbidden_traits = {} - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn1', 'cn4', 'cn5'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # Case3': required on child with forbidden traits - # Let's validate that cn4 dissapears - required_traits = { - geneve_t.name: geneve_t.id - } - forbidden_traits = { - ssl_t.name: ssl_t.id - } - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn1', 'cn5'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # Case4: multiple required on child - required_traits = { - geneve_t.name: geneve_t.id, - ssl_t.name: ssl_t.id - } - forbidden_traits = {} - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn4', 'cn5'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) - - # Case5: required on root and child - required_traits = { - avx2_t.name: avx2_t.id, - geneve_t.name: geneve_t.id - } - forbidden_traits = {} - - rp_tuples_with_trait = rp_obj._get_trees_with_traits( - self.ctx, rp_ids, required_traits, forbidden_traits) - - tree_root_ids = set([p[1] for p in rp_tuples_with_trait]) - - provider_names = ['cn1'] - expect_root_ids = self._get_rp_ids_matching_names(provider_names) - self.assertEqual(expect_root_ids, tree_root_ids) diff --git a/nova/tests/functional/api/openstack/placement/db/test_base.py b/nova/tests/functional/api/openstack/placement/db/test_base.py deleted file mode 100644 index f5e72932ed4b..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_base.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Base class and convenience utilities for functional placement tests.""" - -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import uuidutils - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova.tests.functional.api.openstack.placement import base - - -def create_provider(context, name, *aggs, **kwargs): - parent = kwargs.get('parent') - root = kwargs.get('root') - uuid = kwargs.get('uuid', getattr(uuids, name)) - rp = rp_obj.ResourceProvider(context, name=name, uuid=uuid) - if parent: - rp.parent_provider_uuid = parent - if root: - rp.root_provider_uuid = root - rp.create() - if aggs: - rp.set_aggregates(aggs) - return rp - - -def add_inventory(rp, rc, total, **kwargs): - kwargs.setdefault('max_unit', total) - inv = rp_obj.Inventory(rp._context, resource_provider=rp, - resource_class=rc, total=total, **kwargs) - inv.obj_set_defaults() - rp.add_inventory(inv) - return inv - - -def set_traits(rp, *traits): - tlist = [] - for tname in traits: - try: - trait = rp_obj.Trait.get_by_name(rp._context, tname) - except exception.TraitNotFound: - trait = rp_obj.Trait(rp._context, name=tname) - trait.create() - tlist.append(trait) - rp.set_traits(rp_obj.TraitList(objects=tlist)) - return tlist - - -def ensure_consumer(ctx, user, project, consumer_id=None): - # NOTE(efried): If not specified, use a random consumer UUID - we don't - # want to override any existing allocations from the test case. - consumer_id = consumer_id or uuidutils.generate_uuid() - try: - consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_id) - except exception.NotFound: - consumer = consumer_obj.Consumer( - ctx, uuid=consumer_id, user=user, project=project) - consumer.create() - return consumer - - -def set_allocation(ctx, rp, consumer, rc_used_dict): - alloc = [ - rp_obj.Allocation( - ctx, resource_provider=rp, resource_class=rc, - consumer=consumer, used=used) - for rc, used in rc_used_dict.items() - ] - alloc_list = rp_obj.AllocationList(ctx, objects=alloc) - alloc_list.replace_all() - return alloc_list - - -class PlacementDbBaseTestCase(base.TestCase): - - def setUp(self): - super(PlacementDbBaseTestCase, self).setUp() - # we use context in some places and ctx in other. We should only use - # context, but let's paper over that for now. - self.ctx = self.context - self.user_obj = user_obj.User(self.ctx, external_id='fake-user') - self.user_obj.create() - self.project_obj = project_obj.Project( - self.ctx, external_id='fake-project') - self.project_obj.create() - # For debugging purposes, populated by _create_provider and used by - # _validate_allocation_requests to make failure results more readable. - self.rp_uuid_to_name = {} - - def _create_provider(self, name, *aggs, **kwargs): - rp = create_provider(self.ctx, name, *aggs, **kwargs) - self.rp_uuid_to_name[rp.uuid] = name - return rp - - def allocate_from_provider(self, rp, rc, used, consumer_id=None, - consumer=None): - if consumer is None: - consumer = ensure_consumer( - self.ctx, self.user_obj, self.project_obj, consumer_id) - alloc_list = set_allocation(self.ctx, rp, consumer, {rc: used}) - return alloc_list - - def _make_allocation(self, inv_dict, alloc_dict): - rp = self._create_provider('allocation_resource_provider') - disk_inv = rp_obj.Inventory(context=self.ctx, - resource_provider=rp, **inv_dict) - inv_list = rp_obj.InventoryList(objects=[disk_inv]) - rp.set_inventory(inv_list) - consumer_id = alloc_dict['consumer_id'] - consumer = ensure_consumer( - self.ctx, self.user_obj, self.project_obj, consumer_id) - alloc = rp_obj.Allocation(self.ctx, resource_provider=rp, - consumer=consumer, **alloc_dict) - alloc_list = rp_obj.AllocationList(self.ctx, objects=[alloc]) - alloc_list.replace_all() - return rp, alloc diff --git a/nova/tests/functional/api/openstack/placement/db/test_consumer.py b/nova/tests/functional/api/openstack/placement/db/test_consumer.py deleted file mode 100644 index 184b01d99c65..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_consumer.py +++ /dev/null @@ -1,329 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils.fixture import uuidsentinel as uuids -import sqlalchemy as sa - -from nova.api.openstack.placement import db_api -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova import rc_fields as fields -from nova.tests.functional.api.openstack.placement import base -from nova.tests.functional.api.openstack.placement.db import test_base as tb - - -CONF = cfg.CONF -CONSUMER_TBL = consumer_obj.CONSUMER_TBL -PROJECT_TBL = project_obj.PROJECT_TBL -USER_TBL = user_obj.USER_TBL -ALLOC_TBL = rp_obj._ALLOC_TBL - - -class ConsumerTestCase(tb.PlacementDbBaseTestCase): - def test_non_existing_consumer(self): - self.assertRaises(exception.ConsumerNotFound, - consumer_obj.Consumer.get_by_uuid, self.ctx, - uuids.non_existing_consumer) - - def test_create_and_get(self): - u = user_obj.User(self.ctx, external_id='another-user') - u.create() - p = project_obj.Project(self.ctx, external_id='another-project') - p.create() - c = consumer_obj.Consumer( - self.ctx, uuid=uuids.consumer, user=u, project=p) - c.create() - c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer) - self.assertEqual(1, c.id) - # Project ID == 1 is fake-project created in setup - self.assertEqual(2, c.project.id) - # User ID == 1 is fake-user created in setup - self.assertEqual(2, c.user.id) - self.assertRaises(exception.ConsumerExists, c.create) - - def test_update(self): - """Tests the scenario where a user supplies a different project/user ID - for an allocation's consumer and we call Consumer.update() to save that - information to the consumers table. - """ - # First, create the consumer with the "fake-user" and "fake-project" - # user/project in the base test class's setUp - c = consumer_obj.Consumer( - self.ctx, uuid=uuids.consumer, user=self.user_obj, - project=self.project_obj) - c.create() - c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer) - self.assertEqual(self.project_obj.id, c.project.id) - self.assertEqual(self.user_obj.id, c.user.id) - - # Now change the consumer's project and user to a different project - another_user = user_obj.User(self.ctx, external_id='another-user') - another_user.create() - another_proj = project_obj.Project( - self.ctx, external_id='another-project') - another_proj.create() - - c.project = another_proj - c.user = another_user - c.update() - c = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer) - self.assertEqual(another_proj.id, c.project.id) - self.assertEqual(another_user.id, c.user.id) - - -@db_api.placement_context_manager.reader -def _get_allocs_with_no_consumer_relationship(ctx): - alloc_to_consumer = sa.outerjoin( - ALLOC_TBL, CONSUMER_TBL, - ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid) - sel = sa.select([ALLOC_TBL.c.consumer_id]) - sel = sel.select_from(alloc_to_consumer) - sel = sel.where(CONSUMER_TBL.c.id.is_(None)) - return ctx.session.execute(sel).fetchall() - - -# NOTE(jaypipes): The tb.PlacementDbBaseTestCase creates a project and user -# which is why we don't base off that. We want a completely bare DB for this -# test. -class CreateIncompleteConsumersTestCase(base.TestCase): - - def setUp(self): - super(CreateIncompleteConsumersTestCase, self).setUp() - self.ctx = self.context - - @db_api.placement_context_manager.writer - def _create_incomplete_allocations(self, ctx, num_of_consumer_allocs=1): - # Create some allocations with consumers that don't exist in the - # consumers table to represent old allocations that we expect to be - # "cleaned up" with consumers table records that point to the sentinel - # project/user records. - c1_missing_uuid = uuids.c1_missing - c2_missing_uuid = uuids.c2_missing - c3_missing_uuid = uuids.c3_missing - for c_uuid in (c1_missing_uuid, c2_missing_uuid, c3_missing_uuid): - # Create $num_of_consumer_allocs allocations per consumer with - # different resource classes. - for resource_class_id in range(num_of_consumer_allocs): - ins_stmt = ALLOC_TBL.insert().values( - resource_provider_id=1, - resource_class_id=resource_class_id, - consumer_id=c_uuid, used=1) - ctx.session.execute(ins_stmt) - # Verify there are no records in the projects/users table - project_count = ctx.session.scalar( - sa.select([sa.func.count('*')]).select_from(PROJECT_TBL)) - self.assertEqual(0, project_count) - user_count = ctx.session.scalar( - sa.select([sa.func.count('*')]).select_from(USER_TBL)) - self.assertEqual(0, user_count) - # Verify there are no consumer records for the missing consumers - sel = CONSUMER_TBL.select( - CONSUMER_TBL.c.uuid.in_([c1_missing_uuid, c2_missing_uuid])) - res = ctx.session.execute(sel).fetchall() - self.assertEqual(0, len(res)) - - @db_api.placement_context_manager.reader - def _check_incomplete_consumers(self, ctx): - incomplete_project_id = CONF.placement.incomplete_consumer_project_id - - # Verify we have a record in projects for the missing sentinel - sel = PROJECT_TBL.select( - PROJECT_TBL.c.external_id == incomplete_project_id) - rec = ctx.session.execute(sel).first() - self.assertEqual(incomplete_project_id, rec['external_id']) - incomplete_proj_id = rec['id'] - - # Verify we have a record in users for the missing sentinel - incomplete_user_id = CONF.placement.incomplete_consumer_user_id - sel = user_obj.USER_TBL.select( - USER_TBL.c.external_id == incomplete_user_id) - rec = ctx.session.execute(sel).first() - self.assertEqual(incomplete_user_id, rec['external_id']) - incomplete_user_id = rec['id'] - - # Verify there are records in the consumers table for our old - # allocation records created in the pre-migration setup and that the - # projects and users referenced in those consumer records point to the - # incomplete project/user - sel = CONSUMER_TBL.select(CONSUMER_TBL.c.uuid == uuids.c1_missing) - missing_c1 = ctx.session.execute(sel).first() - self.assertEqual(incomplete_proj_id, missing_c1['project_id']) - self.assertEqual(incomplete_user_id, missing_c1['user_id']) - sel = CONSUMER_TBL.select(CONSUMER_TBL.c.uuid == uuids.c2_missing) - missing_c2 = ctx.session.execute(sel).first() - self.assertEqual(incomplete_proj_id, missing_c2['project_id']) - self.assertEqual(incomplete_user_id, missing_c2['user_id']) - - # Ensure there are no more allocations with incomplete consumers - res = _get_allocs_with_no_consumer_relationship(ctx) - self.assertEqual(0, len(res)) - - def test_create_incomplete_consumers(self): - """Test the online data migration that creates incomplete consumer - records along with the incomplete consumer project/user records. - """ - self._create_incomplete_allocations(self.ctx) - # We do a "really online" online data migration for incomplete - # consumers when calling AllocationList.get_all_by_consumer_id() and - # AllocationList.get_all_by_resource_provider() and there are still - # incomplete consumer records. So, to simulate a situation where the - # operator has yet to run the nova-manage online_data_migration CLI - # tool completely, we first call - # consumer_obj.create_incomplete_consumers() with a batch size of 1. - # This should mean there will be two allocation records still remaining - # with a missing consumer record (since we create 3 total to begin - # with). We then query the allocations table directly to grab that - # consumer UUID in the allocations table that doesn't refer to a - # consumer table record and call - # AllocationList.get_all_by_consumer_id() with that consumer UUID. This - # should create the remaining missing consumer record "inline" in the - # AllocationList.get_all_by_consumer_id() method. - # After that happens, there should still be a single allocation record - # that is missing a relation to the consumers table. We call the - # AllocationList.get_all_by_resource_provider() method and verify that - # method cleans up the remaining incomplete consumers relationship. - res = consumer_obj.create_incomplete_consumers(self.ctx, 1) - self.assertEqual((1, 1), res) - - # Grab the consumer UUID for the allocation record with a - # still-incomplete consumer record. - res = _get_allocs_with_no_consumer_relationship(self.ctx) - self.assertEqual(2, len(res)) - still_missing = res[0][0] - rp_obj.AllocationList.get_all_by_consumer_id(self.ctx, still_missing) - - # There should still be a single missing consumer relationship. Let's - # grab that and call AllocationList.get_all_by_resource_provider() - # which should clean that last one up for us. - res = _get_allocs_with_no_consumer_relationship(self.ctx) - self.assertEqual(1, len(res)) - still_missing = res[0][0] - rp1 = rp_obj.ResourceProvider(self.ctx, id=1) - rp_obj.AllocationList.get_all_by_resource_provider(self.ctx, rp1) - - # get_all_by_resource_provider() should have auto-completed the still - # missing consumer record and _check_incomplete_consumers() should - # assert correctly that there are no more incomplete consumer records. - self._check_incomplete_consumers(self.ctx) - res = consumer_obj.create_incomplete_consumers(self.ctx, 10) - self.assertEqual((0, 0), res) - - def test_create_incomplete_consumers_multiple_allocs_per_consumer(self): - """Tests that missing consumer records are created when listing - allocations against a resource provider or running the online data - migration routine when the consumers have multiple allocations on the - same provider. - """ - self._create_incomplete_allocations(self.ctx, num_of_consumer_allocs=2) - # Run the online data migration to migrate one consumer. The batch size - # needs to be large enough to hit more than one consumer for this test - # where each consumer has two allocations. - res = consumer_obj.create_incomplete_consumers(self.ctx, 2) - self.assertEqual((2, 2), res) - # Migrate the rest by listing allocations on the resource provider. - rp1 = rp_obj.ResourceProvider(self.ctx, id=1) - rp_obj.AllocationList.get_all_by_resource_provider(self.ctx, rp1) - self._check_incomplete_consumers(self.ctx) - res = consumer_obj.create_incomplete_consumers(self.ctx, 10) - self.assertEqual((0, 0), res) - - -class DeleteConsumerIfNoAllocsTestCase(tb.PlacementDbBaseTestCase): - def test_delete_consumer_if_no_allocs(self): - """AllocationList.replace_all() should attempt to delete consumers that - no longer have any allocations. Due to the REST API not having any way - to query for consumers directly (only via the GET - /allocations/{consumer_uuid} endpoint which returns an empty dict even - when no consumer record exists for the {consumer_uuid}) we need to do - this functional test using only the object layer. - """ - # We will use two consumers in this test, only one of which will get - # all of its allocations deleted in a transaction (and we expect that - # consumer record to be deleted) - c1 = consumer_obj.Consumer( - self.ctx, uuid=uuids.consumer1, user=self.user_obj, - project=self.project_obj) - c1.create() - c2 = consumer_obj.Consumer( - self.ctx, uuid=uuids.consumer2, user=self.user_obj, - project=self.project_obj) - c2.create() - - # Create some inventory that we will allocate - cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, fields.ResourceClass.VCPU, 8) - tb.add_inventory(cn1, fields.ResourceClass.MEMORY_MB, 2048) - tb.add_inventory(cn1, fields.ResourceClass.DISK_GB, 2000) - - # Now allocate some of that inventory to two different consumers - allocs = [ - rp_obj.Allocation( - self.ctx, consumer=c1, resource_provider=cn1, - resource_class=fields.ResourceClass.VCPU, used=1), - rp_obj.Allocation( - self.ctx, consumer=c1, resource_provider=cn1, - resource_class=fields.ResourceClass.MEMORY_MB, used=512), - rp_obj.Allocation( - self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.VCPU, used=1), - rp_obj.Allocation( - self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.MEMORY_MB, used=512), - ] - alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) - alloc_list.replace_all() - - # Validate that we have consumer records for both consumers - for c_uuid in (uuids.consumer1, uuids.consumer2): - c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, c_uuid) - self.assertIsNotNone(c_obj) - - # OK, now "remove" the allocation for consumer2 by setting the used - # value for both allocated resources to 0 and re-running the - # AllocationList.replace_all(). This should end up deleting the - # consumer record for consumer2 - allocs = [ - rp_obj.Allocation( - self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.VCPU, used=0), - rp_obj.Allocation( - self.ctx, consumer=c2, resource_provider=cn1, - resource_class=fields.ResourceClass.MEMORY_MB, used=0), - ] - alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) - alloc_list.replace_all() - - # consumer1 should still exist... - c_obj = consumer_obj.Consumer.get_by_uuid(self.ctx, uuids.consumer1) - self.assertIsNotNone(c_obj) - - # but not consumer2... - self.assertRaises( - exception.NotFound, consumer_obj.Consumer.get_by_uuid, - self.ctx, uuids.consumer2) - - # DELETE /allocations/{consumer_uuid} is the other place where we - # delete all allocations for a consumer. Let's delete all for consumer1 - # and check that the consumer record is deleted - alloc_list = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, uuids.consumer1) - alloc_list.delete_all() - - # consumer1 should no longer exist in the DB since we just deleted all - # of its allocations - self.assertRaises( - exception.NotFound, consumer_obj.Consumer.get_by_uuid, - self.ctx, uuids.consumer1) diff --git a/nova/tests/functional/api/openstack/placement/db/test_project.py b/nova/tests/functional/api/openstack/placement/db/test_project.py deleted file mode 100644 index 93e373cc8870..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_project.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils.fixture import uuidsentinel as uuids - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import project as project_obj -from nova.tests.functional.api.openstack.placement.db import test_base as tb - - -class ProjectTestCase(tb.PlacementDbBaseTestCase): - def test_non_existing_project(self): - self.assertRaises( - exception.ProjectNotFound, project_obj.Project.get_by_external_id, - self.ctx, uuids.non_existing_project) - - def test_create_and_get(self): - p = project_obj.Project(self.ctx, external_id='another-project') - p.create() - p = project_obj.Project.get_by_external_id(self.ctx, 'another-project') - # Project ID == 1 is fake-project created in setup - self.assertEqual(2, p.id) - self.assertRaises(exception.ProjectExists, p.create) diff --git a/nova/tests/functional/api/openstack/placement/db/test_reshape.py b/nova/tests/functional/api/openstack/placement/db/test_reshape.py deleted file mode 100644 index 86839ac1875a..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_reshape.py +++ /dev/null @@ -1,359 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils.fixture import uuidsentinel as uuids - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.tests.functional.api.openstack.placement.db import test_base as tb - - -def alloc_for_rc(alloc_list, rc): - for alloc in alloc_list: - if alloc.resource_class == rc: - return alloc - - -class ReshapeTestCase(tb.PlacementDbBaseTestCase): - """Test 'replace the world' reshape transaction.""" - - def test_reshape(self): - """We set up the following scenario: - - BEFORE: single compute node setup - - A single compute node with: - - VCPU, MEMORY_MB, DISK_GB inventory - - Two instances consuming CPU, RAM and DISK from that compute node - - AFTER: hierarchical + shared storage setup - - A compute node parent provider with: - - MEMORY_MB - Two NUMA node child providers containing: - - VCPU - Shared storage provider with: - - DISK_GB - Both instances have their resources split among the providers and - shared storage accordingly - """ - # First create our consumers - i1_uuid = uuids.instance1 - i1_consumer = consumer_obj.Consumer( - self.ctx, uuid=i1_uuid, user=self.user_obj, - project=self.project_obj) - i1_consumer.create() - - i2_uuid = uuids.instance2 - i2_consumer = consumer_obj.Consumer( - self.ctx, uuid=i2_uuid, user=self.user_obj, - project=self.project_obj) - i2_consumer.create() - - cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, 'VCPU', 16) - tb.add_inventory(cn1, 'MEMORY_MB', 32768) - tb.add_inventory(cn1, 'DISK_GB', 1000) - - # Allocate both instances against the single compute node - for consumer in (i1_consumer, i2_consumer): - allocs = [ - rp_obj.Allocation( - self.ctx, resource_provider=cn1, - resource_class='VCPU', consumer=consumer, used=2), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, - resource_class='MEMORY_MB', consumer=consumer, used=1024), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, - resource_class='DISK_GB', consumer=consumer, used=100), - ] - alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) - alloc_list.replace_all() - - # Verify we have the allocations we expect for the BEFORE scenario - before_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, i1_uuid) - self.assertEqual(3, len(before_allocs_i1)) - self.assertEqual(cn1.uuid, before_allocs_i1[0].resource_provider.uuid) - before_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, i2_uuid) - self.assertEqual(3, len(before_allocs_i2)) - self.assertEqual(cn1.uuid, before_allocs_i2[2].resource_provider.uuid) - - # Before we issue the actual reshape() call, we need to first create - # the child providers and sharing storage provider. These are actions - # that the virt driver or external agent is responsible for performing - # *before* attempting any reshape activity. - cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) - cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) - ss = self._create_provider('ss') - - # OK, now emulate the call to POST /reshaper that will be triggered by - # a virt driver wanting to replace the world and change its modeling - # from a single provider to a nested provider tree along with a sharing - # storage provider. - after_inventories = { - # cn1 keeps the RAM only - cn1: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=cn1, - resource_class='MEMORY_MB', total=32768, reserved=0, - max_unit=32768, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - # each NUMA node gets half of the CPUs - cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=cn1_numa0, - resource_class='VCPU', total=8, reserved=0, - max_unit=8, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=cn1_numa1, - resource_class='VCPU', total=8, reserved=0, - max_unit=8, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - # The sharing provider gets a bunch of disk - ss: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=ss, - resource_class='DISK_GB', total=100000, reserved=0, - max_unit=1000, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - } - # We do a fetch from the DB for each instance to get its latest - # generation. This would be done by the resource tracker or scheduler - # report client before issuing the call to reshape() because the - # consumers representing the two instances above will have had their - # generations incremented in the original call to PUT - # /allocations/{consumer_uuid} - i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) - i2_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i2_uuid) - after_allocs = rp_obj.AllocationList(self.ctx, objects=[ - # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB - # from the sharing storage provider - rp_obj.Allocation( - self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', - consumer=i1_consumer, used=2), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', - consumer=i1_consumer, used=1024), - rp_obj.Allocation( - self.ctx, resource_provider=ss, resource_class='DISK_GB', - consumer=i1_consumer, used=100), - # instance2 gets VCPU from NUMA1, MEMORY_MB from cn1 and DISK_GB - # from the sharing storage provider - rp_obj.Allocation( - self.ctx, resource_provider=cn1_numa1, resource_class='VCPU', - consumer=i2_consumer, used=2), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', - consumer=i2_consumer, used=1024), - rp_obj.Allocation( - self.ctx, resource_provider=ss, resource_class='DISK_GB', - consumer=i2_consumer, used=100), - ]) - rp_obj.reshape(self.ctx, after_inventories, after_allocs) - - # Verify that the inventories have been moved to the appropriate - # providers in the AFTER scenario - - # The root compute node should only have MEMORY_MB, nothing else - cn1_inv = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, cn1) - self.assertEqual(1, len(cn1_inv)) - self.assertEqual('MEMORY_MB', cn1_inv[0].resource_class) - self.assertEqual(32768, cn1_inv[0].total) - # Each NUMA node should only have half the original VCPU, nothing else - numa0_inv = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, cn1_numa0) - self.assertEqual(1, len(numa0_inv)) - self.assertEqual('VCPU', numa0_inv[0].resource_class) - self.assertEqual(8, numa0_inv[0].total) - numa1_inv = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, cn1_numa1) - self.assertEqual(1, len(numa1_inv)) - self.assertEqual('VCPU', numa1_inv[0].resource_class) - self.assertEqual(8, numa1_inv[0].total) - # The sharing storage provider should only have DISK_GB, nothing else - ss_inv = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, ss) - self.assertEqual(1, len(ss_inv)) - self.assertEqual('DISK_GB', ss_inv[0].resource_class) - self.assertEqual(100000, ss_inv[0].total) - - # Verify we have the allocations we expect for the AFTER scenario - after_allocs_i1 = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, i1_uuid) - self.assertEqual(3, len(after_allocs_i1)) - # Our VCPU allocation should be in the NUMA0 node - vcpu_alloc = alloc_for_rc(after_allocs_i1, 'VCPU') - self.assertIsNotNone(vcpu_alloc) - self.assertEqual(cn1_numa0.uuid, vcpu_alloc.resource_provider.uuid) - # Our DISK_GB allocation should be in the sharing provider - disk_alloc = alloc_for_rc(after_allocs_i1, 'DISK_GB') - self.assertIsNotNone(disk_alloc) - self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) - # And our MEMORY_MB should remain on the root compute node - ram_alloc = alloc_for_rc(after_allocs_i1, 'MEMORY_MB') - self.assertIsNotNone(ram_alloc) - self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid) - - after_allocs_i2 = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, i2_uuid) - self.assertEqual(3, len(after_allocs_i2)) - # Our VCPU allocation should be in the NUMA1 node - vcpu_alloc = alloc_for_rc(after_allocs_i2, 'VCPU') - self.assertIsNotNone(vcpu_alloc) - self.assertEqual(cn1_numa1.uuid, vcpu_alloc.resource_provider.uuid) - # Our DISK_GB allocation should be in the sharing provider - disk_alloc = alloc_for_rc(after_allocs_i2, 'DISK_GB') - self.assertIsNotNone(disk_alloc) - self.assertEqual(ss.uuid, disk_alloc.resource_provider.uuid) - # And our MEMORY_MB should remain on the root compute node - ram_alloc = alloc_for_rc(after_allocs_i2, 'MEMORY_MB') - self.assertIsNotNone(ram_alloc) - self.assertEqual(cn1.uuid, ram_alloc.resource_provider.uuid) - - def test_reshape_concurrent_inventory_update(self): - """Valid failure scenario for reshape(). We test a situation where the - virt driver has constructed it's "after inventories and allocations" - and sent those to the POST /reshape endpoint. The reshape POST handler - does a quick check of the resource provider generations sent in the - payload and they all check out. - - However, right before the call to resource_provider.reshape(), another - thread legitimately changes the inventory of one of the providers - involved in the reshape transaction. We should get a - ConcurrentUpdateDetected in this case. - """ - # First create our consumers - i1_uuid = uuids.instance1 - i1_consumer = consumer_obj.Consumer( - self.ctx, uuid=i1_uuid, user=self.user_obj, - project=self.project_obj) - i1_consumer.create() - - # then all our original providers - cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, 'VCPU', 16) - tb.add_inventory(cn1, 'MEMORY_MB', 32768) - tb.add_inventory(cn1, 'DISK_GB', 1000) - - # Allocate an instance on our compute node - allocs = [ - rp_obj.Allocation( - self.ctx, resource_provider=cn1, - resource_class='VCPU', consumer=i1_consumer, used=2), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, - resource_class='MEMORY_MB', consumer=i1_consumer, used=1024), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, - resource_class='DISK_GB', consumer=i1_consumer, used=100), - ] - alloc_list = rp_obj.AllocationList(self.ctx, objects=allocs) - alloc_list.replace_all() - - # Before we issue the actual reshape() call, we need to first create - # the child providers and sharing storage provider. These are actions - # that the virt driver or external agent is responsible for performing - # *before* attempting any reshape activity. - cn1_numa0 = self._create_provider('cn1_numa0', parent=cn1.uuid) - cn1_numa1 = self._create_provider('cn1_numa1', parent=cn1.uuid) - ss = self._create_provider('ss') - - # OK, now emulate the call to POST /reshaper that will be triggered by - # a virt driver wanting to replace the world and change its modeling - # from a single provider to a nested provider tree along with a sharing - # storage provider. - after_inventories = { - # cn1 keeps the RAM only - cn1: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=cn1, - resource_class='MEMORY_MB', total=32768, reserved=0, - max_unit=32768, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - # each NUMA node gets half of the CPUs - cn1_numa0: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=cn1_numa0, - resource_class='VCPU', total=8, reserved=0, - max_unit=8, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - cn1_numa1: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=cn1_numa1, - resource_class='VCPU', total=8, reserved=0, - max_unit=8, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - # The sharing provider gets a bunch of disk - ss: rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=ss, - resource_class='DISK_GB', total=100000, reserved=0, - max_unit=1000, min_unit=1, step_size=1, - allocation_ratio=1.0), - ]), - } - # We do a fetch from the DB for each instance to get its latest - # generation. This would be done by the resource tracker or scheduler - # report client before issuing the call to reshape() because the - # consumers representing the two instances above will have had their - # generations incremented in the original call to PUT - # /allocations/{consumer_uuid} - i1_consumer = consumer_obj.Consumer.get_by_uuid(self.ctx, i1_uuid) - after_allocs = rp_obj.AllocationList(self.ctx, objects=[ - # instance1 gets VCPU from NUMA0, MEMORY_MB from cn1 and DISK_GB - # from the sharing storage provider - rp_obj.Allocation( - self.ctx, resource_provider=cn1_numa0, resource_class='VCPU', - consumer=i1_consumer, used=2), - rp_obj.Allocation( - self.ctx, resource_provider=cn1, resource_class='MEMORY_MB', - consumer=i1_consumer, used=1024), - rp_obj.Allocation( - self.ctx, resource_provider=ss, resource_class='DISK_GB', - consumer=i1_consumer, used=100), - ]) - - # OK, now before we call reshape(), here we emulate another thread - # changing the inventory for the sharing storage provider in between - # the time in the REST handler when the sharing storage provider's - # generation was validated and the actual call to reshape() - ss_threadB = rp_obj.ResourceProvider.get_by_uuid(self.ctx, ss.uuid) - # Reduce the amount of storage to 2000, from 100000. - new_ss_inv = rp_obj.InventoryList(self.ctx, objects=[ - rp_obj.Inventory( - self.ctx, resource_provider=ss_threadB, - resource_class='DISK_GB', total=2000, reserved=0, - max_unit=1000, min_unit=1, step_size=1, - allocation_ratio=1.0)]) - ss_threadB.set_inventory(new_ss_inv) - # Double check our storage provider's generation is now greater than - # the original storage provider record being sent to reshape() - self.assertGreater(ss_threadB.generation, ss.generation) - - # And we should legitimately get a failure now to reshape() due to - # another thread updating one of the involved provider's generations - self.assertRaises( - exception.ConcurrentUpdateDetected, - rp_obj.reshape, self.ctx, after_inventories, after_allocs) diff --git a/nova/tests/functional/api/openstack/placement/db/test_resource_class_cache.py b/nova/tests/functional/api/openstack/placement/db/test_resource_class_cache.py deleted file mode 100644 index 11b05c550b9f..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_resource_class_cache.py +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import datetime -import mock - -from oslo_utils import timeutils - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import resource_class_cache as rc_cache -from nova import rc_fields as fields -from nova.tests.functional.api.openstack.placement import base - - -class TestResourceClassCache(base.TestCase): - - def setUp(self): - super(TestResourceClassCache, self).setUp() - db = self.placement_db - self.context = mock.Mock() - sess_mock = mock.Mock() - sess_mock.connection.side_effect = db.get_engine().connect - self.context.session = sess_mock - - @mock.patch('sqlalchemy.select') - def test_rc_cache_std_no_db(self, sel_mock): - """Test that looking up either an ID or a string in the resource class - cache for a standardized resource class does not result in a DB - call. - """ - cache = rc_cache.ResourceClassCache(self.context) - - self.assertEqual('VCPU', cache.string_from_id(0)) - self.assertEqual('MEMORY_MB', cache.string_from_id(1)) - self.assertEqual(0, cache.id_from_string('VCPU')) - self.assertEqual(1, cache.id_from_string('MEMORY_MB')) - - self.assertFalse(sel_mock.called) - - def test_standards(self): - cache = rc_cache.ResourceClassCache(self.context) - standards = cache.STANDARDS - - self.assertEqual(len(standards), len(fields.ResourceClass.STANDARD)) - names = (rc['name'] for rc in standards) - for name in fields.ResourceClass.STANDARD: - self.assertIn(name, names) - - cache = rc_cache.ResourceClassCache(self.context) - standards2 = cache.STANDARDS - self.assertEqual(id(standards), id(standards2)) - - def test_standards_have_time_fields(self): - cache = rc_cache.ResourceClassCache(self.context) - standards = cache.STANDARDS - - first_standard = standards[0] - self.assertIn('updated_at', first_standard) - self.assertIn('created_at', first_standard) - self.assertIsNone(first_standard['updated_at']) - self.assertIsNone(first_standard['created_at']) - - def test_standard_has_time_fields(self): - cache = rc_cache.ResourceClassCache(self.context) - - vcpu_class = cache.all_from_string('VCPU') - expected = {'id': 0, 'name': 'VCPU', 'updated_at': None, - 'created_at': None} - self.assertEqual(expected, vcpu_class) - - def test_rc_cache_custom(self): - """Test that non-standard, custom resource classes hit the database and - return appropriate results, caching the results after a single - query. - """ - cache = rc_cache.ResourceClassCache(self.context) - - # Haven't added anything to the DB yet, so should raise - # ResourceClassNotFound - self.assertRaises(exception.ResourceClassNotFound, - cache.string_from_id, 1001) - self.assertRaises(exception.ResourceClassNotFound, - cache.id_from_string, "IRON_NFV") - - # Now add to the database and verify appropriate results... - with self.context.session.connection() as conn: - ins_stmt = rc_cache._RC_TBL.insert().values( - id=1001, - name='IRON_NFV' - ) - conn.execute(ins_stmt) - - self.assertEqual('IRON_NFV', cache.string_from_id(1001)) - self.assertEqual(1001, cache.id_from_string('IRON_NFV')) - - # Try same again and verify we don't hit the DB. - with mock.patch('sqlalchemy.select') as sel_mock: - self.assertEqual('IRON_NFV', cache.string_from_id(1001)) - self.assertEqual(1001, cache.id_from_string('IRON_NFV')) - self.assertFalse(sel_mock.called) - - # Verify all fields available from all_from_string - iron_nfv_class = cache.all_from_string('IRON_NFV') - self.assertEqual(1001, iron_nfv_class['id']) - self.assertEqual('IRON_NFV', iron_nfv_class['name']) - # updated_at not set on insert - self.assertIsNone(iron_nfv_class['updated_at']) - self.assertIsInstance(iron_nfv_class['created_at'], datetime.datetime) - - # Update IRON_NFV (this is a no-op but will set updated_at) - with self.context.session.connection() as conn: - # NOTE(cdent): When using explict SQL that names columns, - # the automatic timestamp handling provided by the oslo_db - # TimestampMixin is not provided. created_at is a default - # but updated_at is an onupdate. - upd_stmt = rc_cache._RC_TBL.update().where( - rc_cache._RC_TBL.c.id == 1001).values( - name='IRON_NFV', updated_at=timeutils.utcnow()) - conn.execute(upd_stmt) - - # reset cache - cache = rc_cache.ResourceClassCache(self.context) - - iron_nfv_class = cache.all_from_string('IRON_NFV') - # updated_at set on update - self.assertIsInstance(iron_nfv_class['updated_at'], datetime.datetime) - - def test_rc_cache_miss(self): - """Test that we raise ResourceClassNotFound if an unknown resource - class ID or string is searched for. - """ - cache = rc_cache.ResourceClassCache(self.context) - self.assertRaises(exception.ResourceClassNotFound, - cache.string_from_id, 99999999) - self.assertRaises(exception.ResourceClassNotFound, - cache.id_from_string, 'UNKNOWN') diff --git a/nova/tests/functional/api/openstack/placement/db/test_resource_provider.py b/nova/tests/functional/api/openstack/placement/db/test_resource_provider.py deleted file mode 100644 index ce2e70c803af..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_resource_provider.py +++ /dev/null @@ -1,2391 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import functools -import mock -import os_traits -from oslo_db import exception as db_exc -from oslo_utils.fixture import uuidsentinel -import sqlalchemy as sa - -import nova -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.db.sqlalchemy import api_models as models -from nova import rc_fields as fields -from nova.tests.functional.api.openstack.placement.db import test_base as tb - - -DISK_INVENTORY = dict( - total=200, - reserved=10, - min_unit=2, - max_unit=5, - step_size=1, - allocation_ratio=1.0, - resource_class=fields.ResourceClass.DISK_GB -) - -DISK_ALLOCATION = dict( - consumer_id=uuidsentinel.disk_consumer, - used=2, - resource_class=fields.ResourceClass.DISK_GB -) - - -class ResourceProviderTestCase(tb.PlacementDbBaseTestCase): - """Test resource-provider objects' lifecycles.""" - - def test_provider_traits_empty_param(self): - self.assertRaises(ValueError, rp_obj._get_traits_by_provider_tree, - self.ctx, []) - - def test_trait_ids_from_names_empty_param(self): - self.assertRaises(ValueError, rp_obj._trait_ids_from_names, - self.ctx, []) - - def test_create_resource_provider_requires_uuid(self): - resource_provider = rp_obj.ResourceProvider( - context = self.ctx) - self.assertRaises(exception.ObjectActionError, - resource_provider.create) - - def test_create_unknown_parent_provider(self): - """Test that if we provide a parent_provider_uuid value that points to - a resource provider that doesn't exist, that we get an - ObjectActionError. - """ - rp = rp_obj.ResourceProvider( - context=self.ctx, - name='rp1', - uuid=uuidsentinel.rp1, - parent_provider_uuid=uuidsentinel.noexists) - exc = self.assertRaises(exception.ObjectActionError, rp.create) - self.assertIn('parent provider UUID does not exist', str(exc)) - - def test_create_with_parent_provider_uuid_same_as_uuid_fail(self): - """Setting a parent provider UUID to one's own UUID makes no sense, so - check we don't support it. - """ - cn1 = rp_obj.ResourceProvider( - context=self.ctx, uuid=uuidsentinel.cn1, name='cn1', - parent_provider_uuid=uuidsentinel.cn1) - - exc = self.assertRaises(exception.ObjectActionError, cn1.create) - self.assertIn('parent provider UUID cannot be same as UUID', str(exc)) - - def test_create_resource_provider(self): - created_resource_provider = self._create_provider( - uuidsentinel.fake_resource_name, - uuid=uuidsentinel.fake_resource_provider, - ) - self.assertIsInstance(created_resource_provider.id, int) - - retrieved_resource_provider = rp_obj.ResourceProvider.get_by_uuid( - self.ctx, - uuidsentinel.fake_resource_provider - ) - self.assertEqual(retrieved_resource_provider.id, - created_resource_provider.id) - self.assertEqual(retrieved_resource_provider.uuid, - created_resource_provider.uuid) - self.assertEqual(retrieved_resource_provider.name, - created_resource_provider.name) - self.assertEqual(0, created_resource_provider.generation) - self.assertEqual(0, retrieved_resource_provider.generation) - self.assertIsNone(retrieved_resource_provider.parent_provider_uuid) - - def test_create_with_parent_provider_uuid(self): - self._create_provider('p1', uuid=uuidsentinel.create_p) - child = self._create_provider('c1', uuid=uuidsentinel.create_c, - parent=uuidsentinel.create_p) - self.assertEqual(uuidsentinel.create_c, child.uuid) - self.assertEqual(uuidsentinel.create_p, child.parent_provider_uuid) - self.assertEqual(uuidsentinel.create_p, child.root_provider_uuid) - - def test_root_provider_population(self): - """Simulate an old resource provider record in the database that has no - root_provider_uuid set and ensure that when grabbing the resource - provider object, the root_provider_uuid field in the table is set to - the provider's UUID. - """ - rp_tbl = rp_obj._RP_TBL - conn = self.placement_db.get_engine().connect() - - # First, set up a record for an "old-style" resource provider with no - # root provider UUID. - ins_stmt = rp_tbl.insert().values( - id=1, - uuid=uuidsentinel.rp1, - name='rp-1', - root_provider_id=None, - parent_provider_id=None, - generation=42, - ) - conn.execute(ins_stmt) - - rp = rp_obj.ResourceProvider.get_by_uuid(self.ctx, uuidsentinel.rp1) - - # The ResourceProvider._from_db_object() method should have performed - # an online data migration, populating the root_provider_id field - # with the value of the id field. Let's check it happened. - sel_stmt = sa.select([rp_tbl.c.root_provider_id]).where( - rp_tbl.c.id == 1) - res = conn.execute(sel_stmt).fetchall() - self.assertEqual(1, res[0][0]) - # Make sure the object root_provider_uuid is set on load - self.assertEqual(rp.root_provider_uuid, uuidsentinel.rp1) - - def test_inherit_root_from_parent(self): - """Tests that if we update an existing provider's parent provider UUID, - that the root provider UUID of the updated provider is automatically - set to the parent provider's root provider UUID. - """ - rp1 = self._create_provider('rp1') - - # Test the root was auto-set to the create provider's UUID - self.assertEqual(uuidsentinel.rp1, rp1.root_provider_uuid) - - # Create a new provider that we will make the parent of rp1 - parent_rp = self._create_provider('parent') - self.assertEqual(uuidsentinel.parent, parent_rp.root_provider_uuid) - - # Now change rp1 to be a child of parent and check rp1's root is - # changed to that of the parent. - rp1.parent_provider_uuid = parent_rp.uuid - rp1.save() - - self.assertEqual(uuidsentinel.parent, rp1.root_provider_uuid) - - def test_save_root_provider_failed(self): - """Test that if we provide a root_provider_uuid value that points to - a resource provider that doesn't exist, we get an ObjectActionError if - we save the object. - """ - self.assertRaises( - exception.ObjectActionError, - self._create_provider, 'rp1', root=uuidsentinel.noexists) - - def test_save_unknown_parent_provider(self): - """Test that if we provide a parent_provider_uuid value that points to - a resource provider that doesn't exist, that we get an - ObjectActionError if we save the object. - """ - self.assertRaises( - exception.ObjectActionError, - self._create_provider, 'rp1', parent=uuidsentinel.noexists) - - def test_save_resource_provider(self): - created_resource_provider = self._create_provider( - uuidsentinel.fake_resource_name, - uuid=uuidsentinel.fake_resource_provider, - ) - created_resource_provider.name = 'new-name' - created_resource_provider.save() - retrieved_resource_provider = rp_obj.ResourceProvider.get_by_uuid( - self.ctx, - uuidsentinel.fake_resource_provider - ) - self.assertEqual('new-name', retrieved_resource_provider.name) - - def test_save_reparenting_fail(self): - """Tests that we prevent a resource provider's parent provider UUID - from being changed from a non-NULL value to another non-NULL value. - """ - cn1 = self._create_provider('cn1') - self._create_provider('cn2') - self._create_provider('cn3') - - # First, make sure we can set the parent for a provider that does not - # have a parent currently - cn1.parent_provider_uuid = uuidsentinel.cn2 - cn1.save() - - # Now make sure we can't change the parent provider - cn1.parent_provider_uuid = uuidsentinel.cn3 - exc = self.assertRaises(exception.ObjectActionError, cn1.save) - self.assertIn('re-parenting a provider is not currently', str(exc)) - - # Also ensure that we can't "un-parent" a provider - cn1.parent_provider_uuid = None - exc = self.assertRaises(exception.ObjectActionError, cn1.save) - self.assertIn('un-parenting a provider is not currently', str(exc)) - - def test_nested_providers(self): - """Create a hierarchy of resource providers and run through a series of - tests that ensure one cannot delete a resource provider that has no - direct allocations but its child providers do have allocations. - """ - root_rp = self._create_provider('root_rp') - child_rp = self._create_provider('child_rp', - parent=uuidsentinel.root_rp) - grandchild_rp = self._create_provider('grandchild_rp', - parent=uuidsentinel.child_rp) - - # Verify that the root_provider_uuid of both the child and the - # grandchild is the UUID of the grandparent - self.assertEqual(root_rp.uuid, child_rp.root_provider_uuid) - self.assertEqual(root_rp.uuid, grandchild_rp.root_provider_uuid) - - # Create some inventory in the grandchild, allocate some consumers to - # the grandchild and then attempt to delete the root provider and child - # provider, both of which should fail. - tb.add_inventory(grandchild_rp, fields.ResourceClass.VCPU, 1) - - # Check all providers returned when getting by root UUID - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.root_rp, - } - ) - self.assertEqual(3, len(rps)) - - # Check all providers returned when getting by child UUID - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.child_rp, - } - ) - self.assertEqual(3, len(rps)) - - # Check all providers returned when getting by grandchild UUID - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.grandchild_rp, - } - ) - self.assertEqual(3, len(rps)) - - # Make sure that the member_of and uuid filters work with the in_tree - # filter - - # No aggregate associations yet, so expect no records when adding a - # member_of filter - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'member_of': [[uuidsentinel.agg]], - 'in_tree': uuidsentinel.grandchild_rp, - } - ) - self.assertEqual(0, len(rps)) - - # OK, associate the grandchild with an aggregate and verify that ONLY - # the grandchild is returned when asking for the grandchild's tree - # along with the aggregate as member_of - grandchild_rp.set_aggregates([uuidsentinel.agg]) - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'member_of': [[uuidsentinel.agg]], - 'in_tree': uuidsentinel.grandchild_rp, - } - ) - self.assertEqual(1, len(rps)) - self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid) - - # Try filtering on an unknown UUID and verify no results - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'uuid': uuidsentinel.unknown_rp, - 'in_tree': uuidsentinel.grandchild_rp, - } - ) - self.assertEqual(0, len(rps)) - - # And now check that filtering for just the child's UUID along with the - # tree produces just a single provider (the child) - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'uuid': uuidsentinel.child_rp, - 'in_tree': uuidsentinel.grandchild_rp, - } - ) - self.assertEqual(1, len(rps)) - self.assertEqual(uuidsentinel.child_rp, rps[0].uuid) - - # Ensure that the resources filter also continues to work properly with - # the in_tree filter. Request resources that none of the providers - # currently have and ensure no providers are returned - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.grandchild_rp, - 'resources': { - 'VCPU': 200, - } - } - ) - self.assertEqual(0, len(rps)) - - # And now ask for one VCPU, which should only return us the grandchild - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.grandchild_rp, - 'resources': { - 'VCPU': 1, - } - } - ) - self.assertEqual(1, len(rps)) - self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid) - - # Finally, verify we still get the grandchild if filtering on the - # parent's UUID as in_tree - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.child_rp, - 'resources': { - 'VCPU': 1, - } - } - ) - self.assertEqual(1, len(rps)) - self.assertEqual(uuidsentinel.grandchild_rp, rps[0].uuid) - - alloc_list = self.allocate_from_provider( - grandchild_rp, fields.ResourceClass.VCPU, 1) - - self.assertRaises(exception.CannotDeleteParentResourceProvider, - root_rp.destroy) - self.assertRaises(exception.CannotDeleteParentResourceProvider, - child_rp.destroy) - - # Cannot delete provider if it has allocations - self.assertRaises(exception.ResourceProviderInUse, - grandchild_rp.destroy) - - # Now remove the allocations against the child and check that we can - # now delete the child provider - alloc_list.delete_all() - grandchild_rp.destroy() - child_rp.destroy() - root_rp.destroy() - - def test_get_all_in_tree_old_records(self): - """Simulate an old resource provider record in the database that has no - root_provider_uuid set and ensure that when selecting all providers in - a tree, passing in that old resource provider, that we still get that - provider returned. - """ - # Passing a non-existing resource provider UUID should return an empty - # list - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.rp1, - } - ) - self.assertEqual([], rps.objects) - - rp_tbl = rp_obj._RP_TBL - conn = self.placement_db.get_engine().connect() - - # First, set up a record for an "old-style" resource provider with no - # root provider UUID. - ins_stmt = rp_tbl.insert().values( - id=1, - uuid=uuidsentinel.rp1, - name='rp-1', - root_provider_id=None, - parent_provider_id=None, - generation=42, - ) - conn.execute(ins_stmt) - - # NOTE(jaypipes): This is just disabling the online data migration that - # occurs in _from_db_object() that sets root provider ID to ensure we - # don't have any migrations messing with the end result. - with mock.patch('nova.api.openstack.placement.objects.' - 'resource_provider._set_root_provider_id'): - rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={ - 'in_tree': uuidsentinel.rp1, - } - ) - self.assertEqual(1, len(rps)) - - def test_has_provider_trees(self): - """The _has_provider_trees() helper method should return False unless - there is a resource provider that is a parent. - """ - self.assertFalse(rp_obj._has_provider_trees(self.ctx)) - self._create_provider('cn') - - # No parents yet. Should still be False. - self.assertFalse(rp_obj._has_provider_trees(self.ctx)) - - self._create_provider('numa0', parent=uuidsentinel.cn) - - # OK, now we've got a parent, so should be True - self.assertTrue(rp_obj._has_provider_trees(self.ctx)) - - def test_destroy_resource_provider(self): - created_resource_provider = self._create_provider( - uuidsentinel.fake_resource_name, - uuid=uuidsentinel.fake_resource_provider, - ) - created_resource_provider.destroy() - self.assertRaises(exception.NotFound, - rp_obj.ResourceProvider.get_by_uuid, - self.ctx, - uuidsentinel.fake_resource_provider) - self.assertRaises(exception.NotFound, - created_resource_provider.destroy) - - def test_destroy_foreign_key(self): - """This tests bug #1739571.""" - - def emulate_rp_mysql_delete(func): - def wrapped(context, _id): - rp = context.session.query( - models.ResourceProvider).\ - filter( - models.ResourceProvider.id == _id).first() - self.assertIsNone(rp.root_provider_id) - return func(context, _id) - return wrapped - - emulated = emulate_rp_mysql_delete(rp_obj._delete_rp_record) - - rp = self._create_provider(uuidsentinel.fk) - - with mock.patch.object(rp_obj, '_delete_rp_record', emulated): - rp.destroy() - - def test_destroy_allocated_resource_provider_fails(self): - rp, allocation = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) - self.assertRaises(exception.ResourceProviderInUse, - rp.destroy) - - def test_destroy_resource_provider_destroy_inventory(self): - resource_provider = self._create_provider( - uuidsentinel.fake_resource_name, - uuid=uuidsentinel.fake_resource_provider, - ) - tb.add_inventory(resource_provider, DISK_INVENTORY['resource_class'], - DISK_INVENTORY['total']) - inventories = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, resource_provider) - self.assertEqual(1, len(inventories)) - resource_provider.destroy() - inventories = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, resource_provider) - self.assertEqual(0, len(inventories)) - - def test_destroy_with_traits(self): - """Test deleting a resource provider that has a trait successfully. - """ - rp = self._create_provider('fake_rp1', uuid=uuidsentinel.fake_rp1) - custom_trait = 'CUSTOM_TRAIT_1' - tb.set_traits(rp, custom_trait) - - trl = rp_obj.TraitList.get_all_by_resource_provider(self.ctx, rp) - self.assertEqual(1, len(trl)) - - # Delete a resource provider that has a trait assosiation. - rp.destroy() - - # Assert the record has been deleted - # in 'resource_provider_traits' table - # after Resource Provider object has been destroyed. - trl = rp_obj.TraitList.get_all_by_resource_provider(self.ctx, rp) - self.assertEqual(0, len(trl)) - # Assert that NotFound exception is raised. - self.assertRaises(exception.NotFound, - rp_obj.ResourceProvider.get_by_uuid, - self.ctx, uuidsentinel.fake_rp1) - - def test_set_inventory_unknown_resource_class(self): - """Test attempting to set inventory to an unknown resource class raises - an exception. - """ - rp = self._create_provider('compute-host') - self.assertRaises(exception.ResourceClassNotFound, - tb.add_inventory, rp, 'UNKNOWN', 1024, - reserved=15, - min_unit=10, - max_unit=100, - step_size=10, - allocation_ratio=1.0) - - def test_set_inventory_fail_in_use(self): - """Test attempting to set inventory which would result in removing an - inventory record for a resource class that still has allocations - against it. - """ - rp = self._create_provider('compute-host') - tb.add_inventory(rp, 'VCPU', 12) - self.allocate_from_provider(rp, 'VCPU', 1) - - inv = rp_obj.Inventory( - resource_provider=rp, - resource_class='MEMORY_MB', - total=1024, - reserved=0, - min_unit=256, - max_unit=1024, - step_size=256, - allocation_ratio=1.0, - ) - - inv_list = rp_obj.InventoryList(objects=[inv]) - self.assertRaises(exception.InventoryInUse, - rp.set_inventory, - inv_list) - - @mock.patch('nova.api.openstack.placement.objects.resource_provider.LOG') - def test_set_inventory_over_capacity(self, mock_log): - rp = self._create_provider(uuidsentinel.rp_name) - - disk_inv = tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 2048, - reserved=15, - min_unit=10, - max_unit=600, - step_size=10) - vcpu_inv = tb.add_inventory(rp, fields.ResourceClass.VCPU, 12, - allocation_ratio=16.0) - - self.assertFalse(mock_log.warning.called) - - # Allocate something reasonable for the above inventory - self.allocate_from_provider(rp, 'DISK_GB', 500) - - # Update our inventory to over-subscribe us after the above allocation - disk_inv.total = 400 - rp.set_inventory(rp_obj.InventoryList(objects=[disk_inv, vcpu_inv])) - - # We should succeed, but have logged a warning for going over on disk - mock_log.warning.assert_called_once_with( - mock.ANY, {'uuid': rp.uuid, 'resource': 'DISK_GB'}) - - def test_provider_modify_inventory(self): - rp = self._create_provider(uuidsentinel.rp_name) - saved_generation = rp.generation - - disk_inv = tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 1024, - reserved=15, - min_unit=10, - max_unit=100, - step_size=10) - - vcpu_inv = tb.add_inventory(rp, fields.ResourceClass.VCPU, 12, - allocation_ratio=16.0) - - # generation has bumped once for each add - self.assertEqual(saved_generation + 2, rp.generation) - saved_generation = rp.generation - - new_inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(2, len(new_inv_list)) - resource_classes = [inv.resource_class for inv in new_inv_list] - self.assertIn(fields.ResourceClass.VCPU, resource_classes) - self.assertIn(fields.ResourceClass.DISK_GB, resource_classes) - - # reset list to just disk_inv - inv_list = rp_obj.InventoryList(objects=[disk_inv]) - rp.set_inventory(inv_list) - - # generation has bumped - self.assertEqual(saved_generation + 1, rp.generation) - saved_generation = rp.generation - - new_inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(1, len(new_inv_list)) - resource_classes = [inv.resource_class for inv in new_inv_list] - self.assertNotIn(fields.ResourceClass.VCPU, resource_classes) - self.assertIn(fields.ResourceClass.DISK_GB, resource_classes) - self.assertEqual(1024, new_inv_list[0].total) - - # update existing disk inv to new settings - disk_inv = rp_obj.Inventory( - resource_provider=rp, - resource_class=fields.ResourceClass.DISK_GB, - total=2048, - reserved=15, - min_unit=10, - max_unit=100, - step_size=10, - allocation_ratio=1.0) - rp.update_inventory(disk_inv) - - # generation has bumped - self.assertEqual(saved_generation + 1, rp.generation) - saved_generation = rp.generation - - new_inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(1, len(new_inv_list)) - self.assertEqual(2048, new_inv_list[0].total) - - # delete inventory - rp.delete_inventory(fields.ResourceClass.DISK_GB) - - # generation has bumped - self.assertEqual(saved_generation + 1, rp.generation) - saved_generation = rp.generation - - new_inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - result = new_inv_list.find(fields.ResourceClass.DISK_GB) - self.assertIsNone(result) - self.assertRaises(exception.NotFound, rp.delete_inventory, - fields.ResourceClass.DISK_GB) - - # check inventory list is empty - inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(0, len(inv_list)) - - # add some inventory - rp.add_inventory(vcpu_inv) - inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(1, len(inv_list)) - - # generation has bumped - self.assertEqual(saved_generation + 1, rp.generation) - saved_generation = rp.generation - - # add same inventory again - self.assertRaises(db_exc.DBDuplicateEntry, - rp.add_inventory, vcpu_inv) - - # generation has not bumped - self.assertEqual(saved_generation, rp.generation) - - # fail when generation wrong - rp.generation = rp.generation - 1 - self.assertRaises(exception.ConcurrentUpdateDetected, - rp.set_inventory, inv_list) - - def test_delete_inventory_not_found(self): - rp = self._create_provider(uuidsentinel.rp_name) - error = self.assertRaises(exception.NotFound, rp.delete_inventory, - 'DISK_GB') - self.assertIn('No inventory of class DISK_GB found for delete', - str(error)) - - def test_delete_inventory_with_allocation(self): - rp, allocation = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) - error = self.assertRaises(exception.InventoryInUse, - rp.delete_inventory, - 'DISK_GB') - self.assertIn( - "Inventory for 'DISK_GB' on resource provider '%s' in use" - % rp.uuid, str(error)) - - def test_update_inventory_not_found(self): - rp = self._create_provider(uuidsentinel.rp_name) - disk_inv = rp_obj.Inventory(resource_provider=rp, - resource_class='DISK_GB', - total=2048) - disk_inv.obj_set_defaults() - error = self.assertRaises(exception.NotFound, rp.update_inventory, - disk_inv) - self.assertIn('No inventory of class DISK_GB found', - str(error)) - - @mock.patch('nova.api.openstack.placement.objects.resource_provider.LOG') - def test_update_inventory_violates_allocation(self, mock_log): - # Compute nodes that are reconfigured have to be able to set - # their inventory to something that violates allocations so - # we need to make that possible. - rp, allocation = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) - # attempt to set inventory to less than currently allocated - # amounts - new_total = 1 - disk_inv = rp_obj.Inventory( - resource_provider=rp, - resource_class=fields.ResourceClass.DISK_GB, total=new_total) - disk_inv.obj_set_defaults() - rp.update_inventory(disk_inv) - - usages = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp.uuid) - self.assertEqual(allocation.used, usages[0].usage) - - inv_list = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(new_total, inv_list[0].total) - mock_log.warning.assert_called_once_with( - mock.ANY, {'uuid': rp.uuid, 'resource': 'DISK_GB'}) - - def test_add_allocation_increments_generation(self): - rp = self._create_provider(name='foo') - tb.add_inventory(rp, DISK_INVENTORY['resource_class'], - DISK_INVENTORY['total']) - expected_gen = rp.generation + 1 - self.allocate_from_provider(rp, DISK_ALLOCATION['resource_class'], - DISK_ALLOCATION['used']) - self.assertEqual(expected_gen, rp.generation) - - def test_get_all_by_resource_provider_multiple_providers(self): - rp1 = self._create_provider('cn1') - rp2 = self._create_provider(name='cn2') - - for rp in (rp1, rp2): - tb.add_inventory(rp, DISK_INVENTORY['resource_class'], - DISK_INVENTORY['total']) - tb.add_inventory(rp, fields.ResourceClass.IPV4_ADDRESS, 10, - max_unit=2) - - # Get inventories for the first resource provider and validate - # the inventory records have a matching resource provider - got_inv = rp_obj.InventoryList.get_all_by_resource_provider( - self.ctx, rp1) - for inv in got_inv: - self.assertEqual(rp1.id, inv.resource_provider.id) - - -class ResourceProviderListTestCase(tb.PlacementDbBaseTestCase): - def test_get_all_by_filters(self): - for rp_i in ['1', '2']: - self._create_provider( - 'rp_name_' + rp_i, - uuid=getattr(uuidsentinel, 'rp_uuid_' + rp_i)) - - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx) - self.assertEqual(2, len(resource_providers)) - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'name': u'rp_name_1'}) - self.assertEqual(1, len(resource_providers)) - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'uuid': uuidsentinel.rp_uuid_2}) - self.assertEqual(1, len(resource_providers)) - self.assertEqual('rp_name_2', resource_providers[0].name) - - def test_get_all_by_filters_with_resources(self): - for rp_i in ['1', '2']: - rp = self._create_provider('rp_name_' + rp_i) - tb.add_inventory(rp, fields.ResourceClass.VCPU, 2) - tb.add_inventory(rp, fields.ResourceClass.DISK_GB, 1024, - reserved=2) - # Write a specific inventory for testing min/max units and steps - tb.add_inventory(rp, fields.ResourceClass.MEMORY_MB, 1024, - reserved=2, min_unit=2, max_unit=4, step_size=2) - - # Create the VCPU allocation only for the first RP - if rp_i != '1': - continue - self.allocate_from_provider(rp, fields.ResourceClass.VCPU, used=1) - - # Both RPs should accept that request given the only current allocation - # for the first RP is leaving one VCPU - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 1}}) - self.assertEqual(2, len(resource_providers)) - # Now, when asking for 2 VCPUs, only the second RP should accept that - # given the current allocation for the first RP - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 2}}) - self.assertEqual(1, len(resource_providers)) - # Adding a second resource request should be okay for the 2nd RP - # given it has enough disk but we also need to make sure that the - # first RP is not acceptable because of the VCPU request - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 2, - fields.ResourceClass.DISK_GB: 1022}}) - self.assertEqual(1, len(resource_providers)) - # Now, we are asking for both disk and VCPU resources that all the RPs - # can't accept (as the 2nd RP is having a reserved size) - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.VCPU: 2, - fields.ResourceClass.DISK_GB: 1024}}) - self.assertEqual(0, len(resource_providers)) - - # We also want to verify that asking for a specific RP can also be - # checking the resource usage. - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'name': u'rp_name_1', - 'resources': {fields.ResourceClass.VCPU: 1}}) - self.assertEqual(1, len(resource_providers)) - - # Let's verify that the min and max units are checked too - # Case 1: amount is in between min and max and modulo step_size - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 2}}) - self.assertEqual(2, len(resource_providers)) - # Case 2: amount is less than min_unit - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 1}}) - self.assertEqual(0, len(resource_providers)) - # Case 3: amount is more than min_unit - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 5}}) - self.assertEqual(0, len(resource_providers)) - # Case 4: amount is not modulo step_size - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, {'resources': {fields.ResourceClass.MEMORY_MB: 3}}) - self.assertEqual(0, len(resource_providers)) - - def test_get_all_by_filters_with_resources_not_existing(self): - self.assertRaises( - exception.ResourceClassNotFound, - rp_obj.ResourceProviderList.get_all_by_filters, - self.ctx, {'resources': {'FOOBAR': 3}}) - - def test_get_all_by_filters_aggregate(self): - for rp_i in [1, 2, 3, 4]: - aggs = [uuidsentinel.agg_a, uuidsentinel.agg_b] if rp_i % 2 else [] - self._create_provider( - 'rp_name_' + str(rp_i), *aggs, - uuid=getattr(uuidsentinel, 'rp_uuid_' + str(rp_i))) - - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'member_of': [[uuidsentinel.agg_a]]}) - - self.assertEqual(2, len(resource_providers)) - names = [_rp.name for _rp in resource_providers] - self.assertIn('rp_name_1', names) - self.assertIn('rp_name_3', names) - self.assertNotIn('rp_name_2', names) - self.assertNotIn('rp_name_4', names) - - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'member_of': - [[uuidsentinel.agg_a, uuidsentinel.agg_b]]}) - self.assertEqual(2, len(resource_providers)) - - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'member_of': - [[uuidsentinel.agg_a, uuidsentinel.agg_b]], - 'name': u'rp_name_1'}) - self.assertEqual(1, len(resource_providers)) - - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'member_of': - [[uuidsentinel.agg_a, uuidsentinel.agg_b]], - 'name': u'barnabas'}) - self.assertEqual(0, len(resource_providers)) - - resource_providers = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'member_of': - [[uuidsentinel.agg_1, uuidsentinel.agg_2]]}) - self.assertEqual(0, len(resource_providers)) - - def test_get_all_by_required(self): - # Create some resource providers and give them each 0 or more traits. - # rp_name_0: no traits - # rp_name_1: CUSTOM_TRAIT_A - # rp_name_2: CUSTOM_TRAIT_A, CUSTOM_TRAIT_B - # rp_name_3: CUSTOM_TRAIT_A, CUSTOM_TRAIT_B, CUSTOM_TRAIT_C - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', - 'CUSTOM_TRAIT_C'] - for rp_i in [0, 1, 2, 3]: - rp = self._create_provider( - 'rp_name_' + str(rp_i), - uuid=getattr(uuidsentinel, 'rp_uuid_' + str(rp_i))) - if rp_i: - traits = trait_names[0:rp_i] - tb.set_traits(rp, *traits) - - # Three rps (1, 2, 3) should have CUSTOM_TRAIT_A - custom_a_rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, filters={'required': ['CUSTOM_TRAIT_A']}) - self.assertEqual(3, len(custom_a_rps)) - rp_names = [a_rp.name for a_rp in custom_a_rps] - expected_names = ['rp_name_%s' % i for i in [1, 2, 3]] - self.assertEqual(expected_names, sorted(rp_names)) - - # One rp (rp 1) if we forbid CUSTOM_TRAIT_B, with a single trait of - # CUSTOM_TRAIT_A - custom_a_rps = rp_obj.ResourceProviderList.get_all_by_filters( - self.ctx, - filters={'required': ['CUSTOM_TRAIT_A', '!CUSTOM_TRAIT_B']}) - self.assertEqual(1, len(custom_a_rps)) - self.assertEqual(uuidsentinel.rp_uuid_1, custom_a_rps[0].uuid) - self.assertEqual('rp_name_1', custom_a_rps[0].name) - traits = rp_obj.TraitList.get_all_by_resource_provider( - self.ctx, custom_a_rps[0]) - self.assertEqual(1, len(traits)) - self.assertEqual('CUSTOM_TRAIT_A', traits[0].name) - - -class TestResourceProviderAggregates(tb.PlacementDbBaseTestCase): - def test_set_and_get_new_aggregates(self): - aggregate_uuids = [uuidsentinel.agg_a, uuidsentinel.agg_b] - rp = self._create_provider( - uuidsentinel.rp_name, - *aggregate_uuids, - uuid=uuidsentinel.rp_uuid - ) - - read_aggregate_uuids = rp.get_aggregates() - self.assertItemsEqual(aggregate_uuids, read_aggregate_uuids) - - # Since get_aggregates always does a new query this is - # mostly nonsense but is here for completeness. - read_rp = rp_obj.ResourceProvider.get_by_uuid( - self.ctx, uuidsentinel.rp_uuid) - re_read_aggregate_uuids = read_rp.get_aggregates() - self.assertItemsEqual(aggregate_uuids, re_read_aggregate_uuids) - - def test_set_aggregates_is_replace(self): - start_aggregate_uuids = [uuidsentinel.agg_a, uuidsentinel.agg_b] - rp = self._create_provider( - uuidsentinel.rp_name, - *start_aggregate_uuids, - uuid=uuidsentinel.rp_uuid - ) - - read_aggregate_uuids = rp.get_aggregates() - self.assertItemsEqual(start_aggregate_uuids, read_aggregate_uuids) - - rp.set_aggregates([uuidsentinel.agg_a]) - read_aggregate_uuids = rp.get_aggregates() - self.assertNotIn(uuidsentinel.agg_b, read_aggregate_uuids) - self.assertIn(uuidsentinel.agg_a, read_aggregate_uuids) - - # Empty list means delete. - rp.set_aggregates([]) - read_aggregate_uuids = rp.get_aggregates() - self.assertEqual([], read_aggregate_uuids) - - def test_delete_rp_clears_aggs(self): - start_aggregate_uuids = [uuidsentinel.agg_a, uuidsentinel.agg_b] - rp = self._create_provider( - uuidsentinel.rp_name, - *start_aggregate_uuids, - uuid=uuidsentinel.rp_uuid - ) - aggs = rp.get_aggregates() - self.assertEqual(2, len(aggs)) - rp.destroy() - aggs = rp.get_aggregates() - self.assertEqual(0, len(aggs)) - - def test_anchors_for_sharing_providers(self): - """Test _anchors_for_sharing_providers with the following setup. - - .............agg2..... - : : - : +====+ : +====+ ..agg5.. - : | r1 | .| r2 | : +----+ : - : +=+==+ +=+==+ +----+ : | s3 | : - : | | | s2 | : +----+ : - : +=+==+ agg1 +=+==+ +----+ ........ - : | c1 |..... | c2 | : - : +====+ : : +====+ agg4 +----+ - : : : : : | s4 | - : +----+ +----+ : +====+ +----+ - :....| s5 | | s1 |.......agg3......| r3 | - : +----+ +----+ +====+ - :.........agg2...: - """ - agg1 = uuidsentinel.agg1 - agg2 = uuidsentinel.agg2 - agg3 = uuidsentinel.agg3 - agg4 = uuidsentinel.agg4 - agg5 = uuidsentinel.agg5 - shr_trait = rp_obj.Trait.get_by_name( - self.ctx, "MISC_SHARES_VIA_AGGREGATE") - - def mkrp(name, sharing, aggs, **kwargs): - rp = self._create_provider(name, *aggs, **kwargs) - if sharing: - rp.set_traits(rp_obj.TraitList(objects=[shr_trait])) - rp.set_aggregates(aggs) - return rp - - # r1 and c1 constitute a tree. The child is in agg1. We use this to - # show that, when we ask for anchors for s1 (a member of agg1), we get - # the *root* of the tree, not the aggregate member itself (c1). - r1 = mkrp('r1', False, []) - mkrp('c1', False, [agg1], parent=r1.uuid) - # r2 and c2 constitute a tree. The root is in agg2; the child is in - # agg3. We use this to show that, when we ask for anchors for a - # provider that's in both of those aggregates (s1), we only get r2 once - r2 = mkrp('r2', False, [agg2]) - mkrp('c2', False, [agg3], parent=r2.uuid) - # r3 stands alone, but is a member of two aggregates. We use this to - # show that we don't "jump aggregates" - when we ask for anchors for s2 - # we only get r3 (and s2 itself). - r3 = mkrp('r3', False, [agg3, agg4]) - # s* are sharing providers - s1 = mkrp('s1', True, [agg1, agg2, agg3]) - s2 = mkrp('s2', True, [agg4]) - # s3 is the only member of agg5. We use this to show that the provider - # is still considered its own root, even if the aggregate is only - # associated with itself. - s3 = mkrp('s3', True, [agg5]) - # s4 is a broken semi-sharing provider - has MISC_SHARES_VIA_AGGREGATE, - # but is not a member of an aggregate. It has no "anchor". - s4 = mkrp('s4', True, []) - # s5 is a sharing provider whose aggregates overlap with those of s1. - # s5 and s1 will show up as "anchors" for each other. - s5 = mkrp('s5', True, [agg1, agg2]) - - # s1 gets s1 (self), - # r1 via agg1 through c1, - # r2 via agg2 AND via agg3 through c2 - # r3 via agg3 - # s5 via agg1 and agg2 - expected = set([(s1.uuid, rp.uuid) for rp in (s1, r1, r2, r3, s5)]) - self.assertItemsEqual( - expected, rp_obj._anchors_for_sharing_providers(self.ctx, [s1.id])) - - # Get same result (id format) when we set get_id=True - expected = set([(s1.id, rp.id) for rp in (s1, r1, r2, r3, s5)]) - self.assertItemsEqual( - expected, rp_obj._anchors_for_sharing_providers(self.ctx, [s1.id], - get_id=True)) - - # s2 gets s2 (self) and r3 via agg4 - expected = set([(s2.uuid, rp.uuid) for rp in (s2, r3)]) - self.assertItemsEqual( - expected, rp_obj._anchors_for_sharing_providers(self.ctx, [s2.id])) - - # s3 gets self - self.assertEqual( - set([(s3.uuid, s3.uuid)]), rp_obj._anchors_for_sharing_providers( - self.ctx, [s3.id])) - - # s4 isn't really a sharing provider - gets nothing - self.assertEqual( - set([]), rp_obj._anchors_for_sharing_providers(self.ctx, [s4.id])) - - # s5 gets s5 (self), - # r1 via agg1 through c1, - # r2 via agg2 - # s1 via agg1 and agg2 - expected = set([(s5.uuid, rp.uuid) for rp in (s5, r1, r2, s1)]) - self.assertItemsEqual( - expected, rp_obj._anchors_for_sharing_providers(self.ctx, [s5.id])) - - # validate that we can get them all at once - expected = set( - [(s1.id, rp.id) for rp in (r1, r2, r3, s1, s5)] + - [(s2.id, rp.id) for rp in (r3, s2)] + - [(s3.id, rp.id) for rp in (s3,)] + - [(s5.id, rp.id) for rp in (r1, r2, s1, s5)] - ) - self.assertItemsEqual( - expected, rp_obj._anchors_for_sharing_providers(self.ctx, - [s1.id, s2.id, s3.id, s4.id, s5.id], get_id=True)) - - -class TestAllocation(tb.PlacementDbBaseTestCase): - - def test_create_list_and_delete_allocation(self): - rp, _ = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) - - allocations = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, rp) - - self.assertEqual(1, len(allocations)) - - self.assertEqual(DISK_ALLOCATION['used'], - allocations[0].used) - - allocations.delete_all() - - allocations = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, rp) - - self.assertEqual(0, len(allocations)) - - def test_delete_all_with_multiple_consumers(self): - """Tests fix for LP #1781430 where AllocationList.delete_all() when - issued for an AllocationList returned by - AllocationList.get_by_resource_provider() where the resource provider - had multiple consumers allocated against it, left the DB in an - inconsistent state. - """ - # Create a single resource provider and allocate resources for two - # instances from it. Then grab all the provider's allocations with - # AllocationList.get_all_by_resource_provider() and attempt to delete - # them all with AllocationList.delete_all(). After which, another call - # to AllocationList.get_all_by_resource_provider() should return an - # empty list. - cn1 = self._create_provider('cn1') - tb.add_inventory(cn1, 'VCPU', 8) - - c1_uuid = uuidsentinel.consumer1 - c2_uuid = uuidsentinel.consumer2 - - for c_uuid in (c1_uuid, c2_uuid): - self.allocate_from_provider(cn1, 'VCPU', 1, consumer_id=c_uuid) - - allocs = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, cn1) - self.assertEqual(2, len(allocs)) - - allocs.delete_all() - - allocs = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, cn1) - self.assertEqual(0, len(allocs)) - - def test_multi_provider_allocation(self): - """Tests that an allocation that includes more than one resource - provider can be created, listed and deleted properly. - - Bug #1707669 highlighted a situation that arose when attempting to - remove part of an allocation for a source host during a resize - operation where the exiting allocation was not being properly - deleted. - """ - cn_source = self._create_provider('cn_source') - cn_dest = self._create_provider('cn_dest') - - # Add same inventory to both source and destination host - for cn in (cn_source, cn_dest): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 1024, - min_unit=64, - max_unit=1024, - step_size=64, - allocation_ratio=1.5) - - # Create a consumer representing the instance - inst_consumer = consumer_obj.Consumer( - self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, - project=self.project_obj) - inst_consumer.create() - - # Now create an allocation that represents a move operation where the - # scheduler has selected cn_dest as the target host and created a - # "doubled-up" allocation for the duration of the move operation - alloc_list = rp_obj.AllocationList(context=self.ctx, - objects=[ - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=cn_source, - resource_class=fields.ResourceClass.VCPU, - used=1), - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=cn_source, - resource_class=fields.ResourceClass.MEMORY_MB, - used=256), - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=fields.ResourceClass.VCPU, - used=1), - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=fields.ResourceClass.MEMORY_MB, - used=256), - ]) - alloc_list.replace_all() - - src_allocs = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, cn_source) - - self.assertEqual(2, len(src_allocs)) - - dest_allocs = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, cn_dest) - - self.assertEqual(2, len(dest_allocs)) - - consumer_allocs = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, uuidsentinel.instance) - - self.assertEqual(4, len(consumer_allocs)) - - # Validate that when we create an allocation for a consumer that we - # delete any existing allocation and replace it with what the new. - # Here, we're emulating the step that occurs on confirm_resize() where - # the source host pulls the existing allocation for the instance and - # removes any resources that refer to itself and saves the allocation - # back to placement - new_alloc_list = rp_obj.AllocationList(context=self.ctx, - objects=[ - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=fields.ResourceClass.VCPU, - used=1), - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=cn_dest, - resource_class=fields.ResourceClass.MEMORY_MB, - used=256), - ]) - new_alloc_list.replace_all() - - src_allocs = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, cn_source) - - self.assertEqual(0, len(src_allocs)) - - dest_allocs = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, cn_dest) - - self.assertEqual(2, len(dest_allocs)) - - consumer_allocs = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, uuidsentinel.instance) - - self.assertEqual(2, len(consumer_allocs)) - - def test_get_all_by_resource_provider(self): - rp, allocation = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) - allocations = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, rp) - self.assertEqual(1, len(allocations)) - self.assertEqual(rp.id, allocations[0].resource_provider.id) - self.assertEqual(allocation.resource_provider.id, - allocations[0].resource_provider.id) - - -class TestAllocationListCreateDelete(tb.PlacementDbBaseTestCase): - - def test_allocation_checking(self): - """Test that allocation check logic works with 2 resource classes on - one provider. - - If this fails, we get a KeyError at replace_all() - """ - - max_unit = 10 - consumer_uuid = uuidsentinel.consumer - consumer_uuid2 = uuidsentinel.consumer2 - - # Create a consumer representing the two instances - consumer = consumer_obj.Consumer( - self.ctx, uuid=consumer_uuid, user=self.user_obj, - project=self.project_obj) - consumer.create() - consumer2 = consumer_obj.Consumer( - self.ctx, uuid=consumer_uuid2, user=self.user_obj, - project=self.project_obj) - consumer2.create() - - # Create one resource provider with 2 classes - rp1_name = uuidsentinel.rp1_name - rp1_uuid = uuidsentinel.rp1_uuid - rp1_class = fields.ResourceClass.DISK_GB - rp1_used = 6 - - rp2_class = fields.ResourceClass.IPV4_ADDRESS - rp2_used = 2 - - rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) - tb.add_inventory(rp1, rp1_class, 1024, max_unit=max_unit) - tb.add_inventory(rp1, rp2_class, 255, reserved=2, max_unit=max_unit) - - # create the allocations for a first consumer - allocation_1 = rp_obj.Allocation(resource_provider=rp1, - consumer=consumer, - resource_class=rp1_class, - used=rp1_used) - allocation_2 = rp_obj.Allocation(resource_provider=rp1, - consumer=consumer, - resource_class=rp2_class, - used=rp2_used) - allocation_list = rp_obj.AllocationList( - self.ctx, objects=[allocation_1, allocation_2]) - allocation_list.replace_all() - - # create the allocations for a second consumer, until we have - # allocations for more than one consumer in the db, then we - # won't actually be doing real allocation math, which triggers - # the sql monster. - allocation_1 = rp_obj.Allocation(resource_provider=rp1, - consumer=consumer2, - resource_class=rp1_class, - used=rp1_used) - allocation_2 = rp_obj.Allocation(resource_provider=rp1, - consumer=consumer2, - resource_class=rp2_class, - used=rp2_used) - allocation_list = rp_obj.AllocationList( - self.ctx, objects=[allocation_1, allocation_2]) - # If we are joining wrong, this will be a KeyError - allocation_list.replace_all() - - def test_allocation_list_create(self): - max_unit = 10 - consumer_uuid = uuidsentinel.consumer - - # Create a consumer representing the instance - inst_consumer = consumer_obj.Consumer( - self.ctx, uuid=consumer_uuid, user=self.user_obj, - project=self.project_obj) - inst_consumer.create() - - # Create two resource providers - rp1_name = uuidsentinel.rp1_name - rp1_uuid = uuidsentinel.rp1_uuid - rp1_class = fields.ResourceClass.DISK_GB - rp1_used = 6 - - rp2_name = uuidsentinel.rp2_name - rp2_uuid = uuidsentinel.rp2_uuid - rp2_class = fields.ResourceClass.IPV4_ADDRESS - rp2_used = 2 - - rp1 = self._create_provider(rp1_name, uuid=rp1_uuid) - rp2 = self._create_provider(rp2_name, uuid=rp2_uuid) - - # Two allocations, one for each resource provider. - allocation_1 = rp_obj.Allocation(resource_provider=rp1, - consumer=inst_consumer, - resource_class=rp1_class, - used=rp1_used) - allocation_2 = rp_obj.Allocation(resource_provider=rp2, - consumer=inst_consumer, - resource_class=rp2_class, - used=rp2_used) - allocation_list = rp_obj.AllocationList( - self.ctx, objects=[allocation_1, allocation_2]) - - # There's no inventory, we have a failure. - error = self.assertRaises(exception.InvalidInventory, - allocation_list.replace_all) - # Confirm that the resource class string, not index, is in - # the exception and resource providers are listed by uuid. - self.assertIn(rp1_class, str(error)) - self.assertIn(rp2_class, str(error)) - self.assertIn(rp1.uuid, str(error)) - self.assertIn(rp2.uuid, str(error)) - - # Add inventory for one of the two resource providers. This should also - # fail, since rp2 has no inventory. - tb.add_inventory(rp1, rp1_class, 1024, max_unit=1) - self.assertRaises(exception.InvalidInventory, - allocation_list.replace_all) - - # Add inventory for the second resource provider - tb.add_inventory(rp2, rp2_class, 255, reserved=2, max_unit=1) - - # Now the allocations will still fail because max_unit 1 - self.assertRaises(exception.InvalidAllocationConstraintsViolated, - allocation_list.replace_all) - inv1 = rp_obj.Inventory(resource_provider=rp1, - resource_class=rp1_class, - total=1024, max_unit=max_unit) - inv1.obj_set_defaults() - rp1.set_inventory(rp_obj.InventoryList(objects=[inv1])) - inv2 = rp_obj.Inventory(resource_provider=rp2, - resource_class=rp2_class, - total=255, reserved=2, max_unit=max_unit) - inv2.obj_set_defaults() - rp2.set_inventory(rp_obj.InventoryList(objects=[inv2])) - - # Now we can finally allocate. - allocation_list.replace_all() - - # Check that those allocations changed usage on each - # resource provider. - rp1_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp1_uuid) - rp2_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp2_uuid) - self.assertEqual(rp1_used, rp1_usage[0].usage) - self.assertEqual(rp2_used, rp2_usage[0].usage) - - # redo one allocation - # TODO(cdent): This does not currently behave as expected - # because a new allocataion is created, adding to the total - # used, not replacing. - rp1_used += 1 - self.allocate_from_provider(rp1, rp1_class, rp1_used, - consumer=inst_consumer) - - rp1_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp1_uuid) - self.assertEqual(rp1_used, rp1_usage[0].usage) - - # delete the allocations for the consumer - # NOTE(cdent): The database uses 'consumer_id' for the - # column, presumably because some ids might not be uuids, at - # some point in the future. - consumer_allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, consumer_uuid) - consumer_allocations.delete_all() - - rp1_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp1_uuid) - rp2_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp2_uuid) - self.assertEqual(0, rp1_usage[0].usage) - self.assertEqual(0, rp2_usage[0].usage) - - def _make_rp_and_inventory(self, **kwargs): - # Create one resource provider and set some inventory - rp_name = kwargs.get('rp_name') or uuidsentinel.rp_name - rp_uuid = kwargs.get('rp_uuid') or uuidsentinel.rp_uuid - rp = self._create_provider(rp_name, uuid=rp_uuid) - rc = kwargs.pop('resource_class') - tb.add_inventory(rp, rc, 1024, **kwargs) - return rp - - def _validate_usage(self, rp, usage): - rp_usage = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, rp.uuid) - self.assertEqual(usage, rp_usage[0].usage) - - def _check_create_allocations(self, inventory_kwargs, - bad_used, good_used): - rp_class = fields.ResourceClass.DISK_GB - rp = self._make_rp_and_inventory(resource_class=rp_class, - **inventory_kwargs) - - # allocation, bad step_size - self.assertRaises(exception.InvalidAllocationConstraintsViolated, - self.allocate_from_provider, rp, rp_class, bad_used) - - # correct for step size - self.allocate_from_provider(rp, rp_class, good_used) - - # check usage - self._validate_usage(rp, good_used) - - def test_create_all_step_size(self): - bad_used = 4 - good_used = 5 - inventory_kwargs = {'max_unit': 9999, 'step_size': 5} - - self._check_create_allocations(inventory_kwargs, - bad_used, good_used) - - def test_create_all_min_unit(self): - bad_used = 4 - good_used = 5 - inventory_kwargs = {'max_unit': 9999, 'min_unit': 5} - - self._check_create_allocations(inventory_kwargs, - bad_used, good_used) - - def test_create_all_max_unit(self): - bad_used = 5 - good_used = 3 - inventory_kwargs = {'max_unit': 3} - - self._check_create_allocations(inventory_kwargs, - bad_used, good_used) - - def test_create_and_clear(self): - """Test that a used of 0 in an allocation wipes allocations.""" - consumer_uuid = uuidsentinel.consumer - - # Create a consumer representing the instance - inst_consumer = consumer_obj.Consumer( - self.ctx, uuid=consumer_uuid, user=self.user_obj, - project=self.project_obj) - inst_consumer.create() - - rp_class = fields.ResourceClass.DISK_GB - target_rp = self._make_rp_and_inventory(resource_class=rp_class, - max_unit=500) - - # Create two allocations with values and confirm the resulting - # usage is as expected. - allocation1 = rp_obj.Allocation(resource_provider=target_rp, - consumer=inst_consumer, - resource_class=rp_class, - used=100) - allocation2 = rp_obj.Allocation(resource_provider=target_rp, - consumer=inst_consumer, - resource_class=rp_class, - used=200) - allocation_list = rp_obj.AllocationList( - self.ctx, - objects=[allocation1, allocation2], - ) - allocation_list.replace_all() - - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, consumer_uuid) - self.assertEqual(2, len(allocations)) - usage = sum(alloc.used for alloc in allocations) - self.assertEqual(300, usage) - - # Create two allocations, one with 0 used, to confirm the - # resulting usage is only of one. - allocation1 = rp_obj.Allocation(resource_provider=target_rp, - consumer=inst_consumer, - resource_class=rp_class, - used=0) - allocation2 = rp_obj.Allocation(resource_provider=target_rp, - consumer=inst_consumer, - resource_class=rp_class, - used=200) - allocation_list = rp_obj.AllocationList( - self.ctx, - objects=[allocation1, allocation2], - ) - allocation_list.replace_all() - - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, consumer_uuid) - self.assertEqual(1, len(allocations)) - usage = allocations[0].used - self.assertEqual(200, usage) - - # add a source rp and a migration consumer - migration_uuid = uuidsentinel.migration - - # Create a consumer representing the migration - mig_consumer = consumer_obj.Consumer( - self.ctx, uuid=migration_uuid, user=self.user_obj, - project=self.project_obj) - mig_consumer.create() - - source_rp = self._make_rp_and_inventory( - rp_name=uuidsentinel.source_name, rp_uuid=uuidsentinel.source_uuid, - resource_class=rp_class, max_unit=500) - - # Create two allocations, one as the consumer, one as the - # migration. - allocation1 = rp_obj.Allocation(resource_provider=target_rp, - consumer=inst_consumer, - resource_class=rp_class, - used=200) - allocation2 = rp_obj.Allocation(resource_provider=source_rp, - consumer=mig_consumer, - resource_class=rp_class, - used=200) - allocation_list = rp_obj.AllocationList( - self.ctx, - objects=[allocation1, allocation2], - ) - allocation_list.replace_all() - - # Check primary consumer allocations. - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, consumer_uuid) - self.assertEqual(1, len(allocations)) - usage = allocations[0].used - self.assertEqual(200, usage) - - # Check migration allocations. - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, migration_uuid) - self.assertEqual(1, len(allocations)) - usage = allocations[0].used - self.assertEqual(200, usage) - - # Clear the migration and confirm the target. - allocation1 = rp_obj.Allocation(resource_provider=target_rp, - consumer=inst_consumer, - resource_class=rp_class, - used=200) - allocation2 = rp_obj.Allocation(resource_provider=source_rp, - consumer=mig_consumer, - resource_class=rp_class, - used=0) - allocation_list = rp_obj.AllocationList( - self.ctx, - objects=[allocation1, allocation2], - ) - allocation_list.replace_all() - - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, consumer_uuid) - self.assertEqual(1, len(allocations)) - usage = allocations[0].used - self.assertEqual(200, usage) - - allocations = rp_obj.AllocationList.get_all_by_consumer_id( - self.ctx, migration_uuid) - self.assertEqual(0, len(allocations)) - - def test_create_exceeding_capacity_allocation(self): - """Tests on a list of allocations which contains an invalid allocation - exceeds resource provider's capacity. - - Expect InvalidAllocationCapacityExceeded to be raised and all - allocations in the list should not be applied. - - """ - empty_rp = self._create_provider('empty_rp') - full_rp = self._create_provider('full_rp') - - for rp in (empty_rp, full_rp): - tb.add_inventory(rp, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(rp, fields.ResourceClass.MEMORY_MB, 1024, - min_unit=64, - max_unit=1024, - step_size=64) - - # Create a consumer representing the instance - inst_consumer = consumer_obj.Consumer( - self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, - project=self.project_obj) - inst_consumer.create() - - # First create a allocation to consume full_rp's resource. - alloc_list = rp_obj.AllocationList(context=self.ctx, - objects=[ - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=full_rp, - resource_class=fields.ResourceClass.VCPU, - used=12), - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=full_rp, - resource_class=fields.ResourceClass.MEMORY_MB, - used=1024) - ]) - alloc_list.replace_all() - - # Create a consumer representing the second instance - inst2_consumer = consumer_obj.Consumer( - self.ctx, uuid=uuidsentinel.instance2, user=self.user_obj, - project=self.project_obj) - inst2_consumer.create() - - # Create an allocation list consisting of valid requests and an invalid - # request exceeding the memory full_rp can provide. - alloc_list = rp_obj.AllocationList(context=self.ctx, - objects=[ - rp_obj.Allocation( - context=self.ctx, - consumer=inst2_consumer, - resource_provider=empty_rp, - resource_class=fields.ResourceClass.VCPU, - used=12), - rp_obj.Allocation( - context=self.ctx, - consumer=inst2_consumer, - resource_provider=empty_rp, - resource_class=fields.ResourceClass.MEMORY_MB, - used=512), - rp_obj.Allocation( - context=self.ctx, - consumer=inst2_consumer, - resource_provider=full_rp, - resource_class=fields.ResourceClass.VCPU, - used=12), - rp_obj.Allocation( - context=self.ctx, - consumer=inst2_consumer, - resource_provider=full_rp, - resource_class=fields.ResourceClass.MEMORY_MB, - used=512), - ]) - - self.assertRaises(exception.InvalidAllocationCapacityExceeded, - alloc_list.replace_all) - - # Make sure that allocations of both empty_rp and full_rp remain - # unchanged. - allocations = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, full_rp) - self.assertEqual(2, len(allocations)) - - allocations = rp_obj.AllocationList.get_all_by_resource_provider( - self.ctx, empty_rp) - self.assertEqual(0, len(allocations)) - - @mock.patch('nova.api.openstack.placement.objects.resource_provider.LOG') - def test_set_allocations_retry(self, mock_log): - """Test server side allocation write retry handling.""" - - # Create a single resource provider and give it some inventory. - rp1 = self._create_provider('rp1') - tb.add_inventory(rp1, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(rp1, fields.ResourceClass.MEMORY_MB, 1024, - min_unit=64, - max_unit=1024, - step_size=64) - original_generation = rp1.generation - # Verify the generation is what we expect (we'll be checking again - # later). - self.assertEqual(2, original_generation) - - # Create a consumer and have it make an allocation. - inst_consumer = consumer_obj.Consumer( - self.ctx, uuid=uuidsentinel.instance, user=self.user_obj, - project=self.project_obj) - inst_consumer.create() - - alloc_list = rp_obj.AllocationList(context=self.ctx, - objects=[ - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=rp1, - resource_class=fields.ResourceClass.VCPU, - used=12), - rp_obj.Allocation( - context=self.ctx, - consumer=inst_consumer, - resource_provider=rp1, - resource_class=fields.ResourceClass.MEMORY_MB, - used=1024) - ]) - - # Make sure the right exception happens when the retry loop expires. - with mock.patch.object(rp_obj.AllocationList, - 'RP_CONFLICT_RETRY_COUNT', 0): - self.assertRaises( - exception.ResourceProviderConcurrentUpdateDetected, - alloc_list.replace_all) - mock_log.warning.assert_called_with( - 'Exceeded retry limit of %d on allocations write', 0) - - # Make sure the right thing happens after a small number of failures. - # There's a bit of mock magic going on here to enusre that we can - # both do some side effects on _set_allocations as well as have the - # real behavior. Two generation conflicts and then a success. - mock_log.reset_mock() - with mock.patch.object(rp_obj.AllocationList, - 'RP_CONFLICT_RETRY_COUNT', 3): - unmocked_set = functools.partial( - rp_obj.AllocationList._set_allocations, alloc_list) - with mock.patch( - 'nova.api.openstack.placement.objects.resource_provider.' - 'AllocationList._set_allocations') as mock_set: - exceptions = iter([ - exception.ResourceProviderConcurrentUpdateDetected(), - exception.ResourceProviderConcurrentUpdateDetected(), - ]) - - def side_effect(*args, **kwargs): - try: - raise next(exceptions) - except StopIteration: - return unmocked_set(*args, **kwargs) - - mock_set.side_effect = side_effect - alloc_list.replace_all() - self.assertEqual(2, mock_log.debug.call_count) - mock_log.debug.called_with( - 'Retrying allocations write on resource provider ' - 'generation conflict') - self.assertEqual(3, mock_set.call_count) - - # Confirm we're using a different rp object after the change - # and that it has a higher generation. - new_rp = alloc_list[0].resource_provider - self.assertEqual(original_generation, rp1.generation) - self.assertEqual(original_generation + 1, new_rp.generation) - - -class UsageListTestCase(tb.PlacementDbBaseTestCase): - - def test_get_all_null(self): - for uuid in [uuidsentinel.rp_uuid_1, uuidsentinel.rp_uuid_2]: - self._create_provider(uuid, uuid=uuid) - - usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, uuidsentinel.rp_uuid_1) - self.assertEqual(0, len(usage_list)) - - def test_get_all_one_allocation(self): - db_rp, _ = self._make_allocation(DISK_INVENTORY, DISK_ALLOCATION) - inv = rp_obj.Inventory(resource_provider=db_rp, - resource_class=fields.ResourceClass.DISK_GB, - total=1024) - inv.obj_set_defaults() - inv_list = rp_obj.InventoryList(objects=[inv]) - db_rp.set_inventory(inv_list) - - usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, db_rp.uuid) - self.assertEqual(1, len(usage_list)) - self.assertEqual(2, usage_list[0].usage) - self.assertEqual(fields.ResourceClass.DISK_GB, - usage_list[0].resource_class) - - def test_get_inventory_no_allocation(self): - db_rp = self._create_provider('rp_no_inv') - tb.add_inventory(db_rp, fields.ResourceClass.DISK_GB, 1024) - - usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, db_rp.uuid) - self.assertEqual(1, len(usage_list)) - self.assertEqual(0, usage_list[0].usage) - self.assertEqual(fields.ResourceClass.DISK_GB, - usage_list[0].resource_class) - - def test_get_all_multiple_inv(self): - db_rp = self._create_provider('rp_no_inv') - tb.add_inventory(db_rp, fields.ResourceClass.DISK_GB, 1024) - tb.add_inventory(db_rp, fields.ResourceClass.VCPU, 24) - - usage_list = rp_obj.UsageList.get_all_by_resource_provider_uuid( - self.ctx, db_rp.uuid) - self.assertEqual(2, len(usage_list)) - - -class ResourceClassListTestCase(tb.PlacementDbBaseTestCase): - - def test_get_all_no_custom(self): - """Test that if we haven't yet added any custom resource classes, that - we only get a list of ResourceClass objects representing the standard - classes. - """ - rcs = rp_obj.ResourceClassList.get_all(self.ctx) - self.assertEqual(len(fields.ResourceClass.STANDARD), len(rcs)) - - def test_get_all_with_custom(self): - """Test that if we add some custom resource classes, that we get a list - of ResourceClass objects representing the standard classes as well as - the custom classes. - """ - customs = [ - ('CUSTOM_IRON_NFV', 10001), - ('CUSTOM_IRON_ENTERPRISE', 10002), - ] - with self.placement_db.get_engine().connect() as conn: - for custom in customs: - c_name, c_id = custom - ins = rp_obj._RC_TBL.insert().values(id=c_id, name=c_name) - conn.execute(ins) - - rcs = rp_obj.ResourceClassList.get_all(self.ctx) - expected_count = len(fields.ResourceClass.STANDARD) + len(customs) - self.assertEqual(expected_count, len(rcs)) - - -class ResourceClassTestCase(tb.PlacementDbBaseTestCase): - - def test_get_by_name(self): - rc = rp_obj.ResourceClass.get_by_name( - self.ctx, - fields.ResourceClass.VCPU - ) - vcpu_id = fields.ResourceClass.STANDARD.index( - fields.ResourceClass.VCPU - ) - self.assertEqual(vcpu_id, rc.id) - self.assertEqual(fields.ResourceClass.VCPU, rc.name) - - def test_get_by_name_not_found(self): - self.assertRaises(exception.ResourceClassNotFound, - rp_obj.ResourceClass.get_by_name, - self.ctx, - 'CUSTOM_NO_EXISTS') - - def test_get_by_name_custom(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc.create() - get_rc = rp_obj.ResourceClass.get_by_name( - self.ctx, - 'CUSTOM_IRON_NFV', - ) - self.assertEqual(rc.id, get_rc.id) - self.assertEqual(rc.name, get_rc.name) - - def test_create_fail_not_using_namespace(self): - rc = rp_obj.ResourceClass( - context=self.ctx, - name='IRON_NFV', - ) - exc = self.assertRaises(exception.ObjectActionError, rc.create) - self.assertIn('name must start with', str(exc)) - - def test_create_duplicate_standard(self): - rc = rp_obj.ResourceClass( - context=self.ctx, - name=fields.ResourceClass.VCPU, - ) - self.assertRaises(exception.ResourceClassExists, rc.create) - - def test_create(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc.create() - min_id = rp_obj.ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID - self.assertEqual(min_id, rc.id) - - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_ENTERPRISE', - ) - rc.create() - self.assertEqual(min_id + 1, rc.id) - - @mock.patch.object( - nova.api.openstack.placement.objects.resource_provider.ResourceClass, - "_get_next_id") - def test_create_duplicate_id_retry(self, mock_get): - # This order of ID generation will create rc1 with an ID of 42, try to - # create rc2 with the same ID, and then return 43 in the retry loop. - mock_get.side_effect = (42, 42, 43) - rc1 = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc1.create() - rc2 = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_TWO', - ) - rc2.create() - self.assertEqual(rc1.id, 42) - self.assertEqual(rc2.id, 43) - - @mock.patch.object( - nova.api.openstack.placement.objects.resource_provider.ResourceClass, - "_get_next_id") - def test_create_duplicate_id_retry_failing(self, mock_get): - """negative case for test_create_duplicate_id_retry""" - # This order of ID generation will create rc1 with an ID of 44, try to - # create rc2 with the same ID, and then return 45 in the retry loop. - mock_get.side_effect = (44, 44, 44, 44) - rc1 = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc1.create() - rc2 = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_TWO', - ) - rc2.RESOURCE_CREATE_RETRY_COUNT = 3 - self.assertRaises(exception.MaxDBRetriesExceeded, rc2.create) - - def test_create_duplicate_custom(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc.create() - self.assertEqual(rp_obj.ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID, - rc.id) - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - self.assertRaises(exception.ResourceClassExists, rc.create) - - def test_destroy_fail_no_id(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - self.assertRaises(exception.ObjectActionError, rc.destroy) - - def test_destroy_fail_standard(self): - rc = rp_obj.ResourceClass.get_by_name( - self.ctx, - 'VCPU', - ) - self.assertRaises(exception.ResourceClassCannotDeleteStandard, - rc.destroy) - - def test_destroy(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc.create() - rc_list = rp_obj.ResourceClassList.get_all(self.ctx) - rc_ids = (r.id for r in rc_list) - self.assertIn(rc.id, rc_ids) - - rc = rp_obj.ResourceClass.get_by_name( - self.ctx, - 'CUSTOM_IRON_NFV', - ) - - rc.destroy() - rc_list = rp_obj.ResourceClassList.get_all(self.ctx) - rc_ids = (r.id for r in rc_list) - self.assertNotIn(rc.id, rc_ids) - - # Verify rc cache was purged of the old entry - self.assertRaises(exception.ResourceClassNotFound, - rp_obj.ResourceClass.get_by_name, - self.ctx, - 'CUSTOM_IRON_NFV') - - def test_destroy_fail_with_inventory(self): - """Test that we raise an exception when attempting to delete a resource - class that is referenced in an inventory record. - """ - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc.create() - rp = rp_obj.ResourceProvider( - self.ctx, - name='my rp', - uuid=uuidsentinel.rp, - ) - rp.create() - inv = rp_obj.Inventory( - resource_provider=rp, - resource_class='CUSTOM_IRON_NFV', - total=1, - ) - inv.obj_set_defaults() - inv_list = rp_obj.InventoryList(objects=[inv]) - rp.set_inventory(inv_list) - - self.assertRaises(exception.ResourceClassInUse, - rc.destroy) - - rp.set_inventory(rp_obj.InventoryList(objects=[])) - rc.destroy() - rc_list = rp_obj.ResourceClassList.get_all(self.ctx) - rc_ids = (r.id for r in rc_list) - self.assertNotIn(rc.id, rc_ids) - - def test_save_fail_no_id(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - self.assertRaises(exception.ObjectActionError, rc.save) - - def test_save_fail_standard(self): - rc = rp_obj.ResourceClass.get_by_name( - self.ctx, - 'VCPU', - ) - self.assertRaises(exception.ResourceClassCannotUpdateStandard, - rc.save) - - def test_save(self): - rc = rp_obj.ResourceClass( - self.ctx, - name='CUSTOM_IRON_NFV', - ) - rc.create() - - rc = rp_obj.ResourceClass.get_by_name( - self.ctx, - 'CUSTOM_IRON_NFV', - ) - rc.name = 'CUSTOM_IRON_SILVER' - rc.save() - - # Verify rc cache was purged of the old entry - self.assertRaises(exception.NotFound, - rp_obj.ResourceClass.get_by_name, - self.ctx, - 'CUSTOM_IRON_NFV') - - -class ResourceProviderTraitTestCase(tb.PlacementDbBaseTestCase): - - def _assert_traits(self, expected_traits, traits_objs): - expected_traits.sort() - traits = [] - for obj in traits_objs: - traits.append(obj.name) - traits.sort() - self.assertEqual(expected_traits, traits) - - def _assert_traits_in(self, expected_traits, traits_objs): - traits = [trait.name for trait in traits_objs] - for expected in expected_traits: - self.assertIn(expected, traits) - - def test_trait_create(self): - t = rp_obj.Trait(self.ctx) - t.name = 'CUSTOM_TRAIT_A' - t.create() - self.assertIn('id', t) - self.assertEqual(t.name, 'CUSTOM_TRAIT_A') - - def test_trait_create_with_id_set(self): - t = rp_obj.Trait(self.ctx) - t.name = 'CUSTOM_TRAIT_A' - t.id = 1 - self.assertRaises(exception.ObjectActionError, t.create) - - def test_trait_create_without_name_set(self): - t = rp_obj.Trait(self.ctx) - self.assertRaises(exception.ObjectActionError, t.create) - - def test_trait_create_duplicated_trait(self): - trait = rp_obj.Trait(self.ctx) - trait.name = 'CUSTOM_TRAIT_A' - trait.create() - tmp_trait = rp_obj.Trait.get_by_name(self.ctx, 'CUSTOM_TRAIT_A') - self.assertEqual('CUSTOM_TRAIT_A', tmp_trait.name) - duplicated_trait = rp_obj.Trait(self.ctx) - duplicated_trait.name = 'CUSTOM_TRAIT_A' - self.assertRaises(exception.TraitExists, duplicated_trait.create) - - def test_trait_get(self): - t = rp_obj.Trait(self.ctx) - t.name = 'CUSTOM_TRAIT_A' - t.create() - t = rp_obj.Trait.get_by_name(self.ctx, 'CUSTOM_TRAIT_A') - self.assertEqual(t.name, 'CUSTOM_TRAIT_A') - - def test_trait_get_non_existed_trait(self): - self.assertRaises(exception.TraitNotFound, - rp_obj.Trait.get_by_name, self.ctx, 'CUSTOM_TRAIT_A') - - def test_bug_1760322(self): - # Under bug # #1760322, if the first hit to the traits table resulted - # in an exception, the sync transaction rolled back and the table - # stayed empty; but _TRAITS_SYNCED got set to True, so it didn't resync - # next time. - # NOTE(cdent): With change Ic87518948ed5bf4ab79f9819cd94714e350ce265 - # syncing is no longer done in the same way, so the bug fix that this - # test was testing is gone, but this test has been left in place to - # make sure we still get behavior we expect. - try: - rp_obj.Trait.get_by_name(self.ctx, 'CUSTOM_GOLD') - except exception.TraitNotFound: - pass - # Under bug #1760322, this raised TraitNotFound. - rp_obj.Trait.get_by_name(self.ctx, os_traits.HW_CPU_X86_AVX2) - - def test_trait_destroy(self): - t = rp_obj.Trait(self.ctx) - t.name = 'CUSTOM_TRAIT_A' - t.create() - t = rp_obj.Trait.get_by_name(self.ctx, 'CUSTOM_TRAIT_A') - self.assertEqual(t.name, 'CUSTOM_TRAIT_A') - t.destroy() - self.assertRaises(exception.TraitNotFound, rp_obj.Trait.get_by_name, - self.ctx, 'CUSTOM_TRAIT_A') - - def test_trait_destroy_with_standard_trait(self): - t = rp_obj.Trait(self.ctx) - t.id = 1 - t.name = 'HW_CPU_X86_AVX' - self.assertRaises(exception.TraitCannotDeleteStandard, t.destroy) - - def test_traits_get_all(self): - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'] - for name in trait_names: - t = rp_obj.Trait(self.ctx) - t.name = name - t.create() - - self._assert_traits_in(trait_names, - rp_obj.TraitList.get_all(self.ctx)) - - def test_traits_get_all_with_name_in_filter(self): - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'] - for name in trait_names: - t = rp_obj.Trait(self.ctx) - t.name = name - t.create() - - traits = rp_obj.TraitList.get_all(self.ctx, - filters={'name_in': ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B']}) - self._assert_traits(['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B'], traits) - - def test_traits_get_all_with_non_existed_name(self): - traits = rp_obj.TraitList.get_all(self.ctx, - filters={'name_in': ['CUSTOM_TRAIT_X', 'CUSTOM_TRAIT_Y']}) - self.assertEqual(0, len(traits)) - - def test_traits_get_all_with_prefix_filter(self): - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'] - for name in trait_names: - t = rp_obj.Trait(self.ctx) - t.name = name - t.create() - - traits = rp_obj.TraitList.get_all(self.ctx, - filters={'prefix': 'CUSTOM'}) - self._assert_traits( - ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'], - traits) - - def test_traits_get_all_with_non_existed_prefix(self): - traits = rp_obj.TraitList.get_all(self.ctx, - filters={"prefix": "NOT_EXISTED"}) - self.assertEqual(0, len(traits)) - - def test_set_traits_for_resource_provider(self): - rp = self._create_provider('fake_resource_provider') - generation = rp.generation - self.assertIsInstance(rp.id, int) - - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'] - tb.set_traits(rp, *trait_names) - - rp_traits = rp_obj.TraitList.get_all_by_resource_provider(self.ctx, rp) - self._assert_traits(trait_names, rp_traits) - self.assertEqual(rp.generation, generation + 1) - generation = rp.generation - - trait_names.remove('CUSTOM_TRAIT_A') - updated_traits = rp_obj.TraitList.get_all(self.ctx, - filters={'name_in': trait_names}) - self._assert_traits(trait_names, updated_traits) - tb.set_traits(rp, *trait_names) - rp_traits = rp_obj.TraitList.get_all_by_resource_provider(self.ctx, rp) - self._assert_traits(trait_names, rp_traits) - self.assertEqual(rp.generation, generation + 1) - - def test_set_traits_for_correct_resource_provider(self): - """This test creates two ResourceProviders, and attaches same trait to - both of them. Then detaching the trait from one of them, and ensure - the trait still associated with another one. - """ - # Create two ResourceProviders - rp1 = self._create_provider('fake_resource_provider1') - rp2 = self._create_provider('fake_resource_provider2') - - tname = 'CUSTOM_TRAIT_A' - - # Associate the trait with two ResourceProviders - tb.set_traits(rp1, tname) - tb.set_traits(rp2, tname) - - # Ensure the association - rp1_traits = rp_obj.TraitList.get_all_by_resource_provider( - self.ctx, rp1) - rp2_traits = rp_obj.TraitList.get_all_by_resource_provider( - self.ctx, rp2) - self._assert_traits([tname], rp1_traits) - self._assert_traits([tname], rp2_traits) - - # Detach the trait from one of ResourceProvider, and ensure the - # trait association with another ResourceProvider still exists. - tb.set_traits(rp1) - rp1_traits = rp_obj.TraitList.get_all_by_resource_provider( - self.ctx, rp1) - rp2_traits = rp_obj.TraitList.get_all_by_resource_provider( - self.ctx, rp2) - self._assert_traits([], rp1_traits) - self._assert_traits([tname], rp2_traits) - - def test_trait_delete_in_use(self): - rp = self._create_provider('fake_resource_provider') - t, = tb.set_traits(rp, 'CUSTOM_TRAIT_A') - self.assertRaises(exception.TraitInUse, t.destroy) - - def test_traits_get_all_with_associated_true(self): - rp1 = self._create_provider('fake_resource_provider1') - rp2 = self._create_provider('fake_resource_provider2') - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'] - for name in trait_names: - t = rp_obj.Trait(self.ctx) - t.name = name - t.create() - - associated_traits = rp_obj.TraitList.get_all(self.ctx, - filters={'name_in': ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B']}) - rp1.set_traits(associated_traits) - rp2.set_traits(associated_traits) - self._assert_traits(['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B'], - rp_obj.TraitList.get_all(self.ctx, - filters={'associated': True})) - - def test_traits_get_all_with_associated_false(self): - rp1 = self._create_provider('fake_resource_provider1') - rp2 = self._create_provider('fake_resource_provider2') - trait_names = ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B', 'CUSTOM_TRAIT_C'] - for name in trait_names: - t = rp_obj.Trait(self.ctx) - t.name = name - t.create() - - associated_traits = rp_obj.TraitList.get_all(self.ctx, - filters={'name_in': ['CUSTOM_TRAIT_A', 'CUSTOM_TRAIT_B']}) - rp1.set_traits(associated_traits) - rp2.set_traits(associated_traits) - self._assert_traits_in(['CUSTOM_TRAIT_C'], - rp_obj.TraitList.get_all(self.ctx, - filters={'associated': False})) - - -class SharedProviderTestCase(tb.PlacementDbBaseTestCase): - """Tests that the queries used to determine placement in deployments with - shared resource providers such as a shared disk pool result in accurate - reporting of inventory and usage. - """ - - def _requested_resources(self): - STANDARDS = fields.ResourceClass.STANDARD - VCPU_ID = STANDARDS.index(fields.ResourceClass.VCPU) - MEMORY_MB_ID = STANDARDS.index(fields.ResourceClass.MEMORY_MB) - DISK_GB_ID = STANDARDS.index(fields.ResourceClass.DISK_GB) - # The resources we will request - resources = { - VCPU_ID: 1, - MEMORY_MB_ID: 64, - DISK_GB_ID: 100, - } - return resources - - def test_shared_provider_capacity(self): - """Sets up a resource provider that shares DISK_GB inventory via an - aggregate, a couple resource providers representing "local disk" - compute nodes and ensures the _get_providers_sharing_capacity() - function finds that provider and not providers of "local disk". - """ - # Create the two "local disk" compute node providers - cn1 = self._create_provider('cn1') - cn2 = self._create_provider('cn2') - - # Populate the two compute node providers with inventory. One has - # DISK_GB. Both should be excluded from the result (one doesn't have - # the requested resource; but neither is a sharing provider). - for cn in (cn1, cn2): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 32768, - min_unit=64, - max_unit=32768, - step_size=64, - allocation_ratio=1.5) - if cn is cn1: - tb.add_inventory(cn, fields.ResourceClass.DISK_GB, 2000, - min_unit=10, - max_unit=100, - step_size=10) - - # Create the shared storage pool - ss = self._create_provider('shared storage') - - # Give the shared storage pool some inventory of DISK_GB - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, - min_unit=10, - max_unit=100, - step_size=10) - - # Mark the shared storage pool as having inventory shared among any - # provider associated via aggregate - tb.set_traits(ss, "MISC_SHARES_VIA_AGGREGATE") - - # OK, now that has all been set up, let's verify that we get the ID of - # the shared storage pool when we ask for DISK_GB - got_ids = rp_obj._get_providers_with_shared_capacity( - self.ctx, - fields.ResourceClass.STANDARD.index(fields.ResourceClass.DISK_GB), - 100, - ) - self.assertEqual([ss.id], got_ids) diff --git a/nova/tests/functional/api/openstack/placement/db/test_user.py b/nova/tests/functional/api/openstack/placement/db/test_user.py deleted file mode 100644 index 145778c26df8..000000000000 --- a/nova/tests/functional/api/openstack/placement/db/test_user.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -from oslo_utils.fixture import uuidsentinel as uuids - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import user as user_obj -from nova.tests.functional.api.openstack.placement.db import test_base as tb - - -class UserTestCase(tb.PlacementDbBaseTestCase): - def test_non_existing_user(self): - self.assertRaises( - exception.UserNotFound, user_obj.User.get_by_external_id, - self.ctx, uuids.non_existing_user) - - def test_create_and_get(self): - u = user_obj.User(self.ctx, external_id='another-user') - u.create() - u = user_obj.User.get_by_external_id(self.ctx, 'another-user') - # User ID == 1 is fake-user created in setup - self.assertEqual(2, u.id) - self.assertRaises(exception.UserExists, u.create) diff --git a/nova/tests/functional/api/openstack/placement/fixtures/__init__.py b/nova/tests/functional/api/openstack/placement/fixtures/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/functional/api/openstack/placement/fixtures/capture.py b/nova/tests/functional/api/openstack/placement/fixtures/capture.py deleted file mode 100644 index 637c9621220b..000000000000 --- a/nova/tests/functional/api/openstack/placement/fixtures/capture.py +++ /dev/null @@ -1,81 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import warnings - -import fixtures -from oslotest import log - - -class NullHandler(logging.Handler): - """custom default NullHandler to attempt to format the record. - - Used in conjunction with Logging below to detect formatting errors - in debug logs. - """ - def handle(self, record): - self.format(record) - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -class Logging(log.ConfigureLogging): - """A logging fixture providing two important fixtures. - - One is to capture logs for later inspection. - - The other is to make sure that DEBUG logs, even if not captured, - are formatted. - """ - - def __init__(self): - super(Logging, self).__init__() - # If level was not otherwise set, default to INFO. - if self.level is None: - self.level = logging.INFO - # Always capture logs, unlike the parent. - self.capture_logs = True - - def setUp(self): - super(Logging, self).setUp() - if self.level > logging.DEBUG: - handler = NullHandler() - self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) - handler.setLevel(logging.DEBUG) - - -class WarningsFixture(fixtures.Fixture): - """Filter or escalates certain warnings during test runs. - - Add additional entries as required. Remove when obsolete. - """ - - def setUp(self): - super(WarningsFixture, self).setUp() - - # Ignore policy scope warnings. - warnings.filterwarnings('ignore', - message="Policy .* failed scope check", - category=UserWarning) - # The UUIDFields emits a warning if the value is not a valid UUID. - # Let's escalate that to an exception in the test to prevent adding - # violations. - warnings.filterwarnings('error', message=".*invalid UUID.*") - - self.addCleanup(warnings.resetwarnings) diff --git a/nova/tests/functional/api/openstack/placement/fixtures/gabbits.py b/nova/tests/functional/api/openstack/placement/fixtures/gabbits.py deleted file mode 100644 index 4a4aedc55546..000000000000 --- a/nova/tests/functional/api/openstack/placement/fixtures/gabbits.py +++ /dev/null @@ -1,431 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from gabbi import fixture -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_middleware import cors -from oslo_policy import opts as policy_opts -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import uuidutils -from oslotest import output - -from nova.api.openstack.placement import context -from nova.api.openstack.placement import deploy -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova.api.openstack.placement import policies -from nova import rc_fields as fields -from nova.tests import fixtures -from nova.tests.functional.api.openstack.placement.db import test_base as tb -from nova.tests.functional.api.openstack.placement.fixtures import capture -from nova.tests.unit import policy_fixture - - -CONF = cfg.CONF - - -def setup_app(): - return deploy.loadapp(CONF) - - -class APIFixture(fixture.GabbiFixture): - """Setup the required backend fixtures for a basic placement service.""" - - def start_fixture(self): - # Set up stderr and stdout captures by directly driving the - # existing nova fixtures that do that. This captures the - # output that happens outside individual tests (for - # example database migrations). - self.standard_logging_fixture = capture.Logging() - self.standard_logging_fixture.setUp() - self.output_stream_fixture = output.CaptureOutput() - self.output_stream_fixture.setUp() - # Filter ignorable warnings during test runs. - self.warnings_fixture = capture.WarningsFixture() - self.warnings_fixture.setUp() - - self.conf_fixture = config_fixture.Config(CONF) - self.conf_fixture.setUp() - # The Database fixture will get confused if only one of the databases - # is configured. - for group in ('placement_database', 'api_database', 'database'): - self.conf_fixture.config( - group=group, - connection='sqlite://', - sqlite_synchronous=False) - self.conf_fixture.config( - group='api', auth_strategy='noauth2') - - self.context = context.RequestContext() - - # Register CORS opts, but do not set config. This has the - # effect of exercising the "don't use cors" path in - # deploy.py. Without setting some config the group will not - # be present. - CONF.register_opts(cors.CORS_OPTS, 'cors') - # Set default policy opts, otherwise the deploy module can - # NoSuchOptError. - policy_opts.set_defaults(CONF) - - # Make sure default_config_files is an empty list, not None. - # If None /etc/nova/nova.conf is read and confuses results. - CONF([], default_config_files=[]) - - self._reset_db_flags() - self.placement_db_fixture = fixtures.Database('placement') - self.placement_db_fixture.setUp() - # Do this now instead of waiting for the WSGI app to start so that - # fixtures can have traits. - deploy.update_database() - - os.environ['RP_UUID'] = uuidutils.generate_uuid() - os.environ['RP_NAME'] = uuidutils.generate_uuid() - os.environ['CUSTOM_RES_CLASS'] = 'CUSTOM_IRON_NFV' - os.environ['PROJECT_ID'] = uuidutils.generate_uuid() - os.environ['USER_ID'] = uuidutils.generate_uuid() - os.environ['PROJECT_ID_ALT'] = uuidutils.generate_uuid() - os.environ['USER_ID_ALT'] = uuidutils.generate_uuid() - os.environ['INSTANCE_UUID'] = uuidutils.generate_uuid() - os.environ['MIGRATION_UUID'] = uuidutils.generate_uuid() - os.environ['CONSUMER_UUID'] = uuidutils.generate_uuid() - os.environ['PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid() - os.environ['ALT_PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid() - - def stop_fixture(self): - self.placement_db_fixture.cleanUp() - - # Since we clean up the DB, we need to reset the traits sync - # flag to make sure the next run will recreate the traits and - # reset the _RC_CACHE so that any cached resource classes - # are flushed. - self._reset_db_flags() - - self.warnings_fixture.cleanUp() - self.output_stream_fixture.cleanUp() - self.standard_logging_fixture.cleanUp() - self.conf_fixture.cleanUp() - - @staticmethod - def _reset_db_flags(): - rp_obj._TRAITS_SYNCED = False - rp_obj._RC_CACHE = None - - -class AllocationFixture(APIFixture): - """An APIFixture that has some pre-made Allocations. - - +----- same user----+ alt_user - | | | - +----+----------+ +------+-----+ +-----+---------+ - | consumer1 | | consumer2 | | alt_consumer | - | DISK_GB:1000 | | VCPU: 6 | | VCPU: 1 | - | | | | | DISK_GB:20 | - +-------------+-+ +------+-----+ +-+-------------+ - | | | - +-+----------+---------+-+ - | rp | - | VCPU: 10 | - | DISK_GB:2048 | - +------------------------+ - """ - def start_fixture(self): - super(AllocationFixture, self).start_fixture() - - # For use creating and querying allocations/usages - os.environ['ALT_USER_ID'] = uuidutils.generate_uuid() - project_id = os.environ['PROJECT_ID'] - user_id = os.environ['USER_ID'] - alt_user_id = os.environ['ALT_USER_ID'] - - user = user_obj.User(self.context, external_id=user_id) - user.create() - alt_user = user_obj.User(self.context, external_id=alt_user_id) - alt_user.create() - project = project_obj.Project(self.context, external_id=project_id) - project.create() - - # Stealing from the super - rp_name = os.environ['RP_NAME'] - rp_uuid = os.environ['RP_UUID'] - # Create the rp with VCPU and DISK_GB inventory - rp = tb.create_provider(self.context, rp_name, uuid=rp_uuid) - tb.add_inventory(rp, 'DISK_GB', 2048, - step_size=10, min_unit=10, max_unit=1000) - tb.add_inventory(rp, 'VCPU', 10, max_unit=10) - - # Create a first consumer for the DISK_GB allocations - consumer1 = tb.ensure_consumer(self.context, user, project) - tb.set_allocation(self.context, rp, consumer1, {'DISK_GB': 1000}) - os.environ['CONSUMER_0'] = consumer1.uuid - - # Create a second consumer for the VCPU allocations - consumer2 = tb.ensure_consumer(self.context, user, project) - tb.set_allocation(self.context, rp, consumer2, {'VCPU': 6}) - os.environ['CONSUMER_ID'] = consumer2.uuid - - # Create a consumer object for a different user - alt_consumer = tb.ensure_consumer(self.context, alt_user, project) - os.environ['ALT_CONSUMER_ID'] = alt_consumer.uuid - - # Create a couple of allocations for a different user. - tb.set_allocation(self.context, rp, alt_consumer, - {'DISK_GB': 20, 'VCPU': 1}) - - # The ALT_RP_XXX variables are for a resource provider that has - # not been created in the Allocation fixture - os.environ['ALT_RP_UUID'] = uuidutils.generate_uuid() - os.environ['ALT_RP_NAME'] = uuidutils.generate_uuid() - - -class SharedStorageFixture(APIFixture): - """An APIFixture that has some two compute nodes without local storage - associated by aggregate to a provider of shared storage. Both compute - nodes have respectively two numa node resource providers, each of - which has a pf resource provider. - - +-------------------------------------+ - | sharing storage (ss) | - | DISK_GB:2000 | - | traits: MISC_SHARES_VIA_AGGREGATE | - +-----------------+-------------------+ - | aggregate - +--------------------------+ | +------------------------+ - | compute node (cn1) |---+---| compute node (cn2) | - | CPU: 24 | | CPU: 24 | - | MEMORY_MB: 128*1024 | | MEMORY_MB: 128*1024 | - | traits: HW_CPU_X86_SSE, | | | - | HW_CPU_X86_SSE2 | | | - +--------------------------+ +------------------------+ - | | | | - +---------+ +---------+ +---------+ +---------+ - | numa1_1 | | numa1_2 | | numa2_1 | | numa2_2 | - +---------+ +---------+ +---------+ +---------+ - | | | | - +---------------++---------------++---------------++----------------+ - | pf1_1 || pf1_2 || pf2_1 || pf2_2 | - | SRIOV_NET_VF:8|| SRIOV_NET_VF:8|| SRIOV_NET_VF:8|| SRIOV_NET_VF:8 | - +---------------++---------------++---------------++----------------+ - """ - - def start_fixture(self): - super(SharedStorageFixture, self).start_fixture() - - agg_uuid = uuidutils.generate_uuid() - - cn1 = tb.create_provider(self.context, 'cn1', agg_uuid) - cn2 = tb.create_provider(self.context, 'cn2', agg_uuid) - ss = tb.create_provider(self.context, 'ss', agg_uuid) - - numa1_1 = tb.create_provider(self.context, 'numa1_1', parent=cn1.uuid) - numa1_2 = tb.create_provider(self.context, 'numa1_2', parent=cn1.uuid) - numa2_1 = tb.create_provider(self.context, 'numa2_1', parent=cn2.uuid) - numa2_2 = tb.create_provider(self.context, 'numa2_2', parent=cn2.uuid) - - pf1_1 = tb.create_provider(self.context, 'pf1_1', parent=numa1_1.uuid) - pf1_2 = tb.create_provider(self.context, 'pf1_2', parent=numa1_2.uuid) - pf2_1 = tb.create_provider(self.context, 'pf2_1', parent=numa2_1.uuid) - pf2_2 = tb.create_provider(self.context, 'pf2_2', parent=numa2_2.uuid) - - os.environ['AGG_UUID'] = agg_uuid - - os.environ['CN1_UUID'] = cn1.uuid - os.environ['CN2_UUID'] = cn2.uuid - os.environ['SS_UUID'] = ss.uuid - - os.environ['NUMA1_1_UUID'] = numa1_1.uuid - os.environ['NUMA1_2_UUID'] = numa1_2.uuid - os.environ['NUMA2_1_UUID'] = numa2_1.uuid - os.environ['NUMA2_2_UUID'] = numa2_2.uuid - - os.environ['PF1_1_UUID'] = pf1_1.uuid - os.environ['PF1_2_UUID'] = pf1_2.uuid - os.environ['PF2_1_UUID'] = pf2_1.uuid - os.environ['PF2_2_UUID'] = pf2_2.uuid - - # Populate compute node inventory for VCPU and RAM - for cn in (cn1, cn2): - tb.add_inventory(cn, fields.ResourceClass.VCPU, 24, - allocation_ratio=16.0) - tb.add_inventory(cn, fields.ResourceClass.MEMORY_MB, 128 * 1024, - allocation_ratio=1.5) - tb.set_traits(cn1, 'HW_CPU_X86_SSE', 'HW_CPU_X86_SSE2') - - # Populate shared storage provider with DISK_GB inventory and - # mark it shared among any provider associated via aggregate - tb.add_inventory(ss, fields.ResourceClass.DISK_GB, 2000, - reserved=100, allocation_ratio=1.0) - tb.set_traits(ss, 'MISC_SHARES_VIA_AGGREGATE') - - # Populate PF inventory for VF - for pf in (pf1_1, pf1_2, pf2_1, pf2_2): - tb.add_inventory(pf, fields.ResourceClass.SRIOV_NET_VF, - 8, allocation_ratio=1.0) - - -class NonSharedStorageFixture(APIFixture): - """An APIFixture that has two compute nodes with local storage that do not - use shared storage. - """ - def start_fixture(self): - super(NonSharedStorageFixture, self).start_fixture() - - aggA_uuid = uuidutils.generate_uuid() - aggB_uuid = uuidutils.generate_uuid() - aggC_uuid = uuidutils.generate_uuid() - os.environ['AGGA_UUID'] = aggA_uuid - os.environ['AGGB_UUID'] = aggB_uuid - os.environ['AGGC_UUID'] = aggC_uuid - - cn1 = tb.create_provider(self.context, 'cn1') - cn2 = tb.create_provider(self.context, 'cn2') - - os.environ['CN1_UUID'] = cn1.uuid - os.environ['CN2_UUID'] = cn2.uuid - - # Populate compute node inventory for VCPU, RAM and DISK - for cn in (cn1, cn2): - tb.add_inventory(cn, 'VCPU', 24) - tb.add_inventory(cn, 'MEMORY_MB', 128 * 1024) - tb.add_inventory(cn, 'DISK_GB', 2000) - - -class CORSFixture(APIFixture): - """An APIFixture that turns on CORS.""" - - def start_fixture(self): - super(CORSFixture, self).start_fixture() - # NOTE(cdent): If we remove this override, then the cors - # group ends up not existing in the conf, so when deploy.py - # wants to load the CORS middleware, it will not. - self.conf_fixture.config( - group='cors', - allowed_origin='http://valid.example.com') - - -class GranularFixture(APIFixture): - """An APIFixture that sets up the following provider environment for - testing granular resource requests. - -+========================++========================++========================+ -|cn_left ||cn_middle ||cn_right | -|VCPU: 8 ||VCPU: 8 ||VCPU: 8 | -|MEMORY_MB: 4096 ||MEMORY_MB: 4096 ||MEMORY_MB: 4096 | -|DISK_GB: 500 ||SRIOV_NET_VF: 8 ||DISK_GB: 500 | -|VGPU: 8 ||CUSTOM_NET_MBPS: 4000 ||VGPU: 8 | -|SRIOV_NET_VF: 8 ||traits: HW_CPU_X86_AVX, || - max_unit: 2 | -|CUSTOM_NET_MBPS: 4000 || HW_CPU_X86_AVX2,||traits: HW_CPU_X86_MMX, | -|traits: HW_CPU_X86_AVX, || HW_CPU_X86_SSE, || HW_GPU_API_DXVA,| -| HW_CPU_X86_AVX2,|| HW_NIC_ACCEL_TLS|| CUSTOM_DISK_SSD,| -| HW_GPU_API_DXVA,|+=+=====+================++==+========+============+ -| HW_NIC_DCB_PFC, | : : : : a -| CUSTOM_FOO +..+ +--------------------+ : g -+========================+ : a : : g - : g : : C -+========================+ : g : +===============+======+ -|shr_disk_1 | : A : |shr_net | -|DISK_GB: 1000 +..+ : |SRIOV_NET_VF: 16 | -|traits: CUSTOM_DISK_SSD,| : : a |CUSTOM_NET_MBPS: 40000| -| MISC_SHARES_VIA_AGG...| : : g |traits: MISC_SHARES...| -+========================+ : : g +======================+ -+=======================+ : : B -|shr_disk_2 +...+ : -|DISK_GB: 1000 | : -|traits: MISC_SHARES... +.........+ -+=======================+ - """ - def start_fixture(self): - super(GranularFixture, self).start_fixture() - - rp_obj.ResourceClass( - context=self.context, name='CUSTOM_NET_MBPS').create() - - os.environ['AGGA'] = uuids.aggA - os.environ['AGGB'] = uuids.aggB - os.environ['AGGC'] = uuids.aggC - - cn_left = tb.create_provider(self.context, 'cn_left', uuids.aggA) - os.environ['CN_LEFT'] = cn_left.uuid - tb.add_inventory(cn_left, 'VCPU', 8) - tb.add_inventory(cn_left, 'MEMORY_MB', 4096) - tb.add_inventory(cn_left, 'DISK_GB', 500) - tb.add_inventory(cn_left, 'VGPU', 8) - tb.add_inventory(cn_left, 'SRIOV_NET_VF', 8) - tb.add_inventory(cn_left, 'CUSTOM_NET_MBPS', 4000) - tb.set_traits(cn_left, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2', - 'HW_GPU_API_DXVA', 'HW_NIC_DCB_PFC', 'CUSTOM_FOO') - - cn_middle = tb.create_provider( - self.context, 'cn_middle', uuids.aggA, uuids.aggB) - os.environ['CN_MIDDLE'] = cn_middle.uuid - tb.add_inventory(cn_middle, 'VCPU', 8) - tb.add_inventory(cn_middle, 'MEMORY_MB', 4096) - tb.add_inventory(cn_middle, 'SRIOV_NET_VF', 8) - tb.add_inventory(cn_middle, 'CUSTOM_NET_MBPS', 4000) - tb.set_traits(cn_middle, 'HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2', - 'HW_CPU_X86_SSE', 'HW_NIC_ACCEL_TLS') - - cn_right = tb.create_provider( - self.context, 'cn_right', uuids.aggB, uuids.aggC) - os.environ['CN_RIGHT'] = cn_right.uuid - tb.add_inventory(cn_right, 'VCPU', 8) - tb.add_inventory(cn_right, 'MEMORY_MB', 4096) - tb.add_inventory(cn_right, 'DISK_GB', 500) - tb.add_inventory(cn_right, 'VGPU', 8, max_unit=2) - tb.set_traits(cn_right, 'HW_CPU_X86_MMX', 'HW_GPU_API_DXVA', - 'CUSTOM_DISK_SSD') - - shr_disk_1 = tb.create_provider(self.context, 'shr_disk_1', uuids.aggA) - os.environ['SHR_DISK_1'] = shr_disk_1.uuid - tb.add_inventory(shr_disk_1, 'DISK_GB', 1000) - tb.set_traits(shr_disk_1, 'MISC_SHARES_VIA_AGGREGATE', - 'CUSTOM_DISK_SSD') - - shr_disk_2 = tb.create_provider( - self.context, 'shr_disk_2', uuids.aggA, uuids.aggB) - os.environ['SHR_DISK_2'] = shr_disk_2.uuid - tb.add_inventory(shr_disk_2, 'DISK_GB', 1000) - tb.set_traits(shr_disk_2, 'MISC_SHARES_VIA_AGGREGATE') - - shr_net = tb.create_provider(self.context, 'shr_net', uuids.aggC) - os.environ['SHR_NET'] = shr_net.uuid - tb.add_inventory(shr_net, 'SRIOV_NET_VF', 16) - tb.add_inventory(shr_net, 'CUSTOM_NET_MBPS', 40000) - tb.set_traits(shr_net, 'MISC_SHARES_VIA_AGGREGATE') - - -class OpenPolicyFixture(APIFixture): - """An APIFixture that changes all policy rules to allow non-admins.""" - - def start_fixture(self): - super(OpenPolicyFixture, self).start_fixture() - self.placement_policy_fixture = policy_fixture.PlacementPolicyFixture() - self.placement_policy_fixture.setUp() - # Get all of the registered rules and set them to '@' to allow any - # user to have access. The nova policy "admin_or_owner" concept does - # not really apply to most of placement resources since they do not - # have a user_id/project_id attribute. - rules = {} - for rule in policies.list_rules(): - name = rule.name - # Ignore "base" rules for role:admin. - if name in ['placement', 'admin_api']: - continue - rules[name] = '@' - self.placement_policy_fixture.set_rules(rules) - - def stop_fixture(self): - super(OpenPolicyFixture, self).stop_fixture() - self.placement_policy_fixture.cleanUp() diff --git a/nova/tests/functional/api/openstack/placement/fixtures/placement.py b/nova/tests/functional/api/openstack/placement/fixtures/placement.py deleted file mode 100644 index f8c3948ef971..000000000000 --- a/nova/tests/functional/api/openstack/placement/fixtures/placement.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures -from oslo_config import cfg -from oslo_config import fixture as config_fixture -from oslo_utils import uuidutils -from wsgi_intercept import interceptor - -from nova.api.openstack.placement import deploy - - -CONF = cfg.CONF - - -class PlacementFixture(fixtures.Fixture): - """A fixture to placement operations. - - Runs a local WSGI server bound on a free port and having the Placement - application with NoAuth middleware. - This fixture also prevents calling the ServiceCatalog for getting the - endpoint. - - It's possible to ask for a specific token when running the fixtures so - all calls would be passing this token. - """ - def __init__(self, token='admin'): - self.token = token - - def setUp(self): - super(PlacementFixture, self).setUp() - - conf_fixture = config_fixture.Config(CONF) - conf_fixture.config(group='api', auth_strategy='noauth2') - loader = deploy.loadapp(CONF) - app = lambda: loader - self.endpoint = 'http://%s/placement' % uuidutils.generate_uuid() - intercept = interceptor.RequestsInterceptor(app, url=self.endpoint) - intercept.install_intercept() - self.addCleanup(intercept.uninstall_intercept) diff --git a/nova/tests/functional/api/openstack/placement/gabbits/aggregate-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/aggregate-policy.yaml deleted file mode 100644 index bfed5e6957ac..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/aggregate-policy.yaml +++ /dev/null @@ -1,39 +0,0 @@ -# This tests the individual CRUD operations on -# /resource_providers/{uuid}/aggregates* using a non-admin user with an -# open policy configuration. The response validation is intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -vars: - - &agg_1 f918801a-5e54-4bee-9095-09a9d0c786b8 - - &agg_2 a893eb5c-e2a0-4251-ab26-f71d3b0cfc0b - -tests: - -- name: post new resource provider - POST: /resource_providers - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - -- name: put some aggregates - PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates - data: - resource_provider_generation: 0 - aggregates: - - *agg_1 - - *agg_2 - status: 200 - -- name: get those aggregates - GET: $LAST_URL - response_json_paths: - $.aggregates.`len`: 2 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/aggregate.yaml b/nova/tests/functional/api/openstack/placement/gabbits/aggregate.yaml deleted file mode 100644 index fb75f9e7ae0e..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/aggregate.yaml +++ /dev/null @@ -1,204 +0,0 @@ - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -vars: - - &agg_1 f918801a-5e54-4bee-9095-09a9d0c786b8 - - &agg_2 a893eb5c-e2a0-4251-ab26-f71d3b0cfc0b - -tests: -- name: get aggregates for bad resource provider - GET: /resource_providers/6984bb2d-830d-4c8d-ac64-c5a8103664be/aggregates - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: put aggregates for bad resource provider - PUT: /resource_providers/6984bb2d-830d-4c8d-ac64-c5a8103664be/aggregates - data: [] - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: post new resource provider - POST: /resource_providers - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - -- name: get empty aggregates - GET: /resource_providers/$ENVIRON['RP_UUID']/aggregates - response_json_paths: - $.aggregates: [] - -- name: aggregates 404 for out of date microversion get - GET: /resource_providers/$ENVIRON['RP_UUID']/aggregates - request_headers: - openstack-api-version: placement 1.0 - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: aggregates 404 for out of date microversion put - PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates - request_headers: - openstack-api-version: placement 1.0 - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: put some aggregates - old payload and new microversion - PUT: $LAST_URL - data: - - *agg_1 - - *agg_2 - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: put some aggregates - new payload and old microversion - PUT: $LAST_URL - request_headers: - openstack-api-version: placement 1.18 - data: - resource_provider_generation: 0 - aggregates: - - *agg_1 - - *agg_2 - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: put some aggregates - new payload and new microversion - PUT: $LAST_URL - data: - resource_provider_generation: 0 - aggregates: - - *agg_1 - - *agg_2 - status: 200 - response_headers: - content-type: /application/json/ - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_json_paths: - $.aggregates[0]: *agg_1 - $.aggregates[1]: *agg_2 - $.resource_provider_generation: 1 - -- name: get those aggregates - GET: $LAST_URL - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_json_paths: - $.aggregates.`len`: 2 - -- name: clear those aggregates - generation conflict - PUT: $LAST_URL - data: - resource_provider_generation: 0 - aggregates: [] - status: 409 - response_json_paths: - $.errors[0].code: placement.concurrent_update - -- name: clear those aggregates - PUT: $LAST_URL - data: - resource_provider_generation: 1 - aggregates: [] - status: 200 - response_json_paths: - $.aggregates: [] - -- name: get empty aggregates again - GET: /resource_providers/$ENVIRON['RP_UUID']/aggregates - response_json_paths: - $.aggregates: [] - -- name: put non json - PUT: $LAST_URL - data: '{"bad", "not json"}' - status: 400 - response_strings: - - Malformed JSON - response_json_paths: - $.errors[0].title: Bad Request - -- name: put invalid json no generation - PUT: $LAST_URL - data: - aggregates: - - *agg_1 - - *agg_2 - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: put invalid json not uuids - PUT: $LAST_URL - data: - aggregates: - - harry - - sally - resource_provider_generation: 2 - status: 400 - response_strings: - - "is not a 'uuid'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: put same aggregates twice - PUT: $LAST_URL - data: - aggregates: - - *agg_1 - - *agg_1 - resource_provider_generation: 2 - status: 400 - response_strings: - - has non-unique elements - response_json_paths: - $.errors[0].title: Bad Request - -# The next two tests confirm that prior to version 1.15 we do -# not set the cache-control or last-modified headers on either -# PUT or GET. - -- name: put some aggregates v1.14 - PUT: $LAST_URL - request_headers: - openstack-api-version: placement 1.14 - data: - - *agg_1 - - *agg_2 - response_forbidden_headers: - - last-modified - - cache-control - -- name: get those aggregates v1.14 - GET: $LAST_URL - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - last-modified - - cache-control diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocation-bad-class.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocation-bad-class.yaml deleted file mode 100644 index ec08406419b8..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocation-bad-class.yaml +++ /dev/null @@ -1,77 +0,0 @@ - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - # Using <= 1.11 allows the PUT /allocations/{uuid} below - # to work with the older request form. - openstack-api-version: placement 1.11 - -tests: - -- name: create a resource provider - POST: /resource_providers - data: - name: an rp - status: 201 - -- name: get resource provider - GET: $LOCATION - status: 200 - -- name: create a resource class - PUT: /resource_classes/CUSTOM_GOLD - status: 201 - -- name: add inventory to an rp - PUT: /resource_providers/$HISTORY['get resource provider'].$RESPONSE['$.uuid']/inventories - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 24 - CUSTOM_GOLD: - total: 5 - status: 200 - -- name: allocate some of it two - desc: this is the one that used to raise a 500 - PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8 - data: - allocations: - - resource_provider: - uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid'] - resources: - DISK_GB: 5 - CUSTOM_GOLD: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 409 - -- name: allocate some of it custom - PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8 - data: - allocations: - - resource_provider: - uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid'] - resources: - CUSTOM_GOLD: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 - -- name: allocate some of it standard - PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8 - data: - allocations: - - resource_provider: - uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid'] - resources: - DISK_GB: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 409 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml deleted file mode 100644 index 22b6ce6f8e62..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-member-of.yaml +++ /dev/null @@ -1,141 +0,0 @@ -# Tests of allocation candidates API - -fixtures: - - NonSharedStorageFixture - -defaults: - request_headers: - x-auth-token: admin - content-type: application/json - accept: application/json - openstack-api-version: placement 1.24 - -tests: - -- name: get bad member_of microversion - GET: /allocation_candidates?resources=VCPU:1&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] - request_headers: - openstack-api-version: placement 1.18 - status: 400 - response_strings: - - Invalid query string parameters - - "'member_of' was unexpected" - -- name: get allocation candidates invalid member_of value - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=INVALID_UUID - status: 400 - response_strings: - - Expected 'member_of' parameter to contain valid UUID(s). - -- name: get allocation candidates no 'in:' for multiple member_of - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] - status: 400 - response_strings: - - Multiple values for 'member_of' must be prefixed with the 'in:' keyword - -- name: get allocation candidates multiple member_of with 'in:' but invalid values - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],INVALID_UUID - status: 400 - response_strings: - - Expected 'member_of' parameter to contain valid UUID(s). - -- name: get allocation candidates multiple member_of with 'in:' but no aggregates - GET: /allocation_candidates?&member_of=in:&resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100 - status: 400 - response_strings: - - Expected 'member_of' parameter to contain valid UUID(s). - -- name: get allocation candidates with no match for member_of - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 0 - -- name: get compute node 1 state - GET: /resource_providers/$ENVIRON['CN1_UUID'] - -- name: associate the first compute node with aggA - PUT: /resource_providers/$ENVIRON['CN1_UUID']/aggregates - data: - aggregates: - - $ENVIRON['AGGA_UUID'] - resource_provider_generation: $HISTORY['get compute node 1 state'].$RESPONSE['$.generation'] - status: 200 - -- name: verify that the member_of call now returns 1 allocation_candidate - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 1 - -- name: get compute node 2 state - GET: /resource_providers/$ENVIRON['CN2_UUID'] - -- name: associate the second compute node with aggB - PUT: /resource_providers/$ENVIRON['CN2_UUID']/aggregates - data: - aggregates: - - $ENVIRON['AGGB_UUID'] - resource_provider_generation: $HISTORY['get compute node 2 state'].$RESPONSE['$.generation'] - status: 200 - -- name: verify that the member_of call now returns both RPs - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 2 - -- name: verify that aggC still returns no RPs - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGC_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 0 - -- name: get current compute node 1 state - GET: /resource_providers/$ENVIRON['CN1_UUID'] - -- name: now associate the first compute node with both aggA and aggC - PUT: /resource_providers/$ENVIRON['CN1_UUID']/aggregates - data: - aggregates: - - $ENVIRON['AGGA_UUID'] - - $ENVIRON['AGGC_UUID'] - resource_provider_generation: $HISTORY['get current compute node 1 state'].$RESPONSE['$.generation'] - -- name: verify that the member_of call for aggs A and B still returns 2 allocation_candidates - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=in:$ENVIRON['AGGA_UUID'],$ENVIRON['AGGB_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 2 - status: 200 - -- name: verify microversion fail for multiple member_of params - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']&member_of=$ENVIRON['AGGB_UUID'] - request_headers: - openstack-api-version: placement 1.23 - status: 400 - response_strings: - - 'Multiple member_of parameters are not supported' - response_json_paths: - $.errors[0].title: Bad Request - -- name: verify that no RP is associated with BOTH aggA and aggB - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']&member_of=$ENVIRON['AGGB_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 0 - -- name: associate the second compute node with aggA and aggB - PUT: /resource_providers/$ENVIRON['CN2_UUID']/aggregates - data: - aggregates: - - $ENVIRON['AGGA_UUID'] - - $ENVIRON['AGGB_UUID'] - resource_provider_generation: $HISTORY['associate the second compute node with aggB'].$RESPONSE['$.resource_provider_generation'] - status: 200 - -- name: verify that second RP is associated with BOTH aggA and aggB - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&member_of=$ENVIRON['AGGA_UUID']&member_of=$ENVIRON['AGGB_UUID'] - status: 200 - response_json_paths: - $.allocation_requests.`len`: 1 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-policy.yaml deleted file mode 100644 index 6fbe75dd1bea..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates-policy.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# This tests GET /allocation_candidates using a non-admin -# user with an open policy configuration. The response validation is -# intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: get allocation candidates - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100 - status: 200 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates.yaml deleted file mode 100644 index 7383ed8a5c2c..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocation-candidates.yaml +++ /dev/null @@ -1,416 +0,0 @@ -# Tests of allocation candidates API - -fixtures: - - SharedStorageFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - openstack-api-version: placement 1.10 - -tests: - -- name: list traits - GET: /traits - status: 200 - response_strings: - # We at least want to make sure that this trait is supported. - - MISC_SHARES_VIA_AGGREGATE - -- name: get allocation candidates before microversion - GET: /allocation_candidates?resources=VCPU:1 - request_headers: - openstack-api-version: placement 1.8 - status: 404 - -- name: get allocation candidates empty resources - GET: /allocation_candidates?resources= - status: 400 - response_strings: - - Badly formed resources parameter. Expected resources query string parameter in form - - 'Got: empty string.' - -- name: get allocation candidates no resources - GET: /allocation_candidates - status: 400 - response_strings: - - "'resources' is a required property" - -- name: get bad resource class - GET: /allocation_candidates?resources=MCPU:99 - status: 400 - response_strings: - - Invalid resource class in resources parameter - -- name: get bad limit microversion - GET: /allocation_candidates?resources=VCPU:1&limit=5 - request_headers: - openstack-api-version: placement 1.15 - status: 400 - response_strings: - - Invalid query string parameters - - "'limit' was unexpected" - -- name: get bad limit type - GET: /allocation_candidates?resources=VCPU:1&limit=cow - request_headers: - openstack-api-version: placement 1.16 - status: 400 - response_strings: - - Invalid query string parameters - - "Failed validating 'pattern'" - -- name: get bad limit value negative - GET: /allocation_candidates?resources=VCPU:1&limit=-99 - request_headers: - openstack-api-version: placement 1.16 - status: 400 - response_strings: - - Invalid query string parameters - - "Failed validating 'pattern'" - -- name: get bad limit value zero - GET: /allocation_candidates?resources=VCPU:1&limit=0 - request_headers: - openstack-api-version: placement 1.16 - status: 400 - response_strings: - - Invalid query string parameters - - "Failed validating 'pattern'" - -- name: get allocation candidates no allocations yet - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100 - status: 200 - response_json_paths: - # There are 3 providers involved. 2 compute nodes, 1 shared storage - # provider - $.provider_summaries.`len`: 3 - # However, there are only 2 allocation requests, one for each compute - # node that provides the VCPU/MEMORY_MB and DISK_GB provided by the - # shared storage provider - $.allocation_requests.`len`: 2 - # Verify that compute node #1 only has VCPU and MEMORY_MB listed in the - # resource requests. This validates the entire resources key. - $.allocation_requests..allocations[?resource_provider.uuid="$ENVIRON['CN1_UUID']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - # Verify that compute node #2 only has VCPU and MEMORY_MB listed in the - # resource requests - $.allocation_requests..allocations[?resource_provider.uuid="$ENVIRON['CN2_UUID']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - # Verify that shared storage provider only has DISK_GB listed in the - # resource requests, but is listed twice - $.allocation_requests..allocations[?resource_provider.uuid="$ENVIRON['SS_UUID']"].resources[DISK_GB]: [100, 100] - # Verify that the resources listed in the provider summary for compute - # node #1 show correct capacity and usage - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources[VCPU].capacity: 384 # 16.0 * 24 - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources[VCPU].used: 0 - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources[MEMORY_MB].capacity: 196608 # 1.5 * 128G - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources[MEMORY_MB].used: 0 - # Verify that the resources listed in the provider summary for compute - # node #2 show correct capacity and usage - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources[VCPU].capacity: 384 # 16.0 * 24 - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources[VCPU].used: 0 - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources[MEMORY_MB].capacity: 196608 # 1.5 * 128G - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources[MEMORY_MB].used: 0 - # Verify that the resources listed in the provider summary for shared - # storage show correct capacity and usage - $.provider_summaries["$ENVIRON['SS_UUID']"].resources[DISK_GB].capacity: 1900 # 1.0 * 2000 - 100G - $.provider_summaries["$ENVIRON['SS_UUID']"].resources[DISK_GB].used: 0 - response_forbidden_headers: - # In the default microversion in this file (1.10) the cache headers - # are not preset. - - cache-control - - last-modified - -# Verify the 1.12 format of the allocation_requests sub object which -# changes from a list-list to dict-ish format. -- name: get allocation candidates 1.12 dictish - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100 - request_headers: - openstack-api-version: placement 1.12 - response_json_paths: - # There are 3 providers involved. 2 compute nodes, 1 shared storage - # provider - $.provider_summaries.`len`: 3 - # However, there are only 2 allocation requests, one for each compute - # node that provides the VCPU/MEMORY_MB and DISK_GB provided by the - # shared storage provider - $.allocation_requests.`len`: 2 - # Verify that compute node #1 only has VCPU and MEMORY_MB listed in the - # resource requests. This validates the entire resources key. - $.allocation_requests..allocations["$ENVIRON['CN1_UUID']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - # Verify that compute node #2 only has VCPU and MEMORY_MB listed in the - # resource requests - $.allocation_requests..allocations["$ENVIRON['CN2_UUID']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - # Verify that shared storage provider only has DISK_GB listed in the - # resource requests, but is listed twice - $.allocation_requests..allocations["$ENVIRON['SS_UUID']"].resources[DISK_GB]: [100, 100] - -- name: get allocation candidates cache headers - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100 - request_headers: - # microversion 1.15 to cause cache headers - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: get allocation candidates with limit - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&limit=1 - status: 200 - request_headers: - openstack-api-version: placement 1.16 - response_json_paths: - $.allocation_requests.`len`: 1 - -- name: get allocation candidates with required traits in old version - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE - status: 400 - request_headers: - openstack-api-version: placement 1.16 - response_strings: - - Invalid query string parameters - - "'required' was unexpected" - -- name: get allocation candidates without traits summary in old version - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100 - status: 200 - request_headers: - openstack-api-version: placement 1.16 - response_json_paths: - $.provider_summaries["$ENVIRON['CN1_UUID']"].`len`: 1 - $.provider_summaries["$ENVIRON['CN2_UUID']"].`len`: 1 - -- name: get allocation candidates with invalid trait - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=INVALID_TRAIT - status: 400 - request_headers: - openstack-api-version: placement 1.17 - response_strings: - - No such trait(s) - -- name: get allocation candidates with empty required value - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required= - status: 400 - request_headers: - openstack-api-version: placement 1.17 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC." - -- name: get allocation candidates with empty required value 1.22 - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required= - status: 400 - request_headers: - openstack-api-version: placement 1.22 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,!CUSTOM_MAGIC." - -- name: get allocation candidates with invalid required value - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=,, - status: 400 - request_headers: - openstack-api-version: placement 1.17 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC." - -- name: get allocation candidates with forbidden trait pre-forbidden - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=!CUSTOM_MAGIC - status: 400 - request_headers: - openstack-api-version: placement 1.17 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC." - -- name: get allocation candidates with required trait - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE - status: 200 - request_headers: - openstack-api-version: placement 1.17 - response_json_paths: - $.allocation_requests.`len`: 1 - $.provider_summaries.`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].traits.`sorted`: - - HW_CPU_X86_SSE - - HW_CPU_X86_SSE2 - -- name: get allocation candidates with forbidden trait - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=!HW_CPU_X86_SSE - status: 200 - request_headers: - openstack-api-version: placement 1.22 - response_json_paths: - # There are no allocations for CN1 - $.allocation_requests.`len`: 1 - $.allocation_requests[0].allocations.`len`: 2 - $.allocation_requests[0].allocations["$ENVIRON['CN2_UUID']"].resources.VCPU: 1 - $.allocation_requests[0].allocations["$ENVIRON['CN2_UUID']"].resources.MEMORY_MB: 1024 - $.allocation_requests[0].allocations["$ENVIRON['SS_UUID']"].resources.DISK_GB: 100 - -- name: get allocation candidates with multiple required traits - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE,HW_CPU_X86_SSE2 - status: 200 - request_headers: - openstack-api-version: placement 1.17 - response_json_paths: - $.allocation_requests.`len`: 1 - $.provider_summaries.`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].traits.`sorted`: - - HW_CPU_X86_SSE - - HW_CPU_X86_SSE2 - -- name: get allocation candidates with required trait and no matching - GET: /allocation_candidates?resources=VCPU:1,MEMORY_MB:1024,DISK_GB:100&required=HW_CPU_X86_SSE3 - status: 200 - request_headers: - openstack-api-version: placement 1.17 - response_json_paths: - $.allocation_requests.`len`: 0 - $.provider_summaries.`len`: 0 - -# Before microversion 1.27, the ``provider_summaries`` field in the response -# of the ``GET /allocation_candidates`` API included inventories of resource -# classes that are requested. -- name: get allocation candidates provider summaries with requested resource - GET: /allocation_candidates?resources=VCPU:1 - status: 200 - request_headers: - openstack-api-version: placement 1.26 - response_json_paths: - $.allocation_requests.`len`: 2 - $.provider_summaries.`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources.`len`: 1 - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources: - VCPU: - capacity: 384 # 16.0 * 24 - used: 0 - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources.`len`: 1 - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources: - VCPU: - capacity: 384 # 16.0 * 24 - used: 0 - -# From microversion 1.27, the ``provider_summaries`` field includes -# all the resource class inventories regardless of whether it is requested. -- name: get allocation candidates provider summaries with all resources - GET: /allocation_candidates?resources=VCPU:1 - status: 200 - request_headers: - openstack-api-version: placement 1.27 - response_json_paths: - $.allocation_requests.`len`: 2 - $.provider_summaries.`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources.`len`: 2 - $.provider_summaries["$ENVIRON['CN1_UUID']"].resources: - VCPU: - capacity: 384 # 16.0 * 24 - used: 0 - MEMORY_MB: - capacity: 196608 # 1.5 * 128G - used: 0 - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources.`len`: 2 - $.provider_summaries["$ENVIRON['CN2_UUID']"].resources: - VCPU: - capacity: 384 # 16.0 * 24 - used: 0 - MEMORY_MB: - capacity: 196608 # 1.5 * 128G - used: 0 - -# Before microversion 1.29, no root/parent uuid is included -- name: get allocation candidates no root or parent uuid - GET: /allocation_candidates?resources=VCPU:1 - status: 200 - request_headers: - openstack-api-version: placement 1.28 - response_json_paths: - $.allocation_requests.`len`: 2 - $.provider_summaries.`len`: 2 - $.provider_summaries.["$ENVIRON['CN1_UUID']"].`len`: 2 - $.provider_summaries.["$ENVIRON['CN2_UUID']"].`len`: 2 - -- name: get allocation candidates with root and parent uuid - GET: /allocation_candidates?resources=VCPU:1 - status: 200 - request_headers: - openstack-api-version: placement 1.29 - response_json_paths: - $.allocation_requests.`len`: 2 - $.provider_summaries.`len`: 10 - $.provider_summaries.["$ENVIRON['CN1_UUID']"].`len`: 4 - $.provider_summaries.["$ENVIRON['CN2_UUID']"].`len`: 4 - $.provider_summaries.["$ENVIRON['CN1_UUID']"].parent_provider_uuid: null - $.provider_summaries.["$ENVIRON['CN1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']" - $.provider_summaries.["$ENVIRON['NUMA1_1_UUID']"].parent_provider_uuid: "$ENVIRON['CN1_UUID']" - $.provider_summaries.["$ENVIRON['NUMA1_1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']" - $.provider_summaries.["$ENVIRON['NUMA1_2_UUID']"].parent_provider_uuid: "$ENVIRON['CN1_UUID']" - $.provider_summaries.["$ENVIRON['NUMA1_2_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']" - $.provider_summaries.["$ENVIRON['PF1_1_UUID']"].parent_provider_uuid: "$ENVIRON['NUMA1_1_UUID']" - $.provider_summaries.["$ENVIRON['PF1_1_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']" - $.provider_summaries.["$ENVIRON['PF1_2_UUID']"].parent_provider_uuid: "$ENVIRON['NUMA1_2_UUID']" - $.provider_summaries.["$ENVIRON['PF1_2_UUID']"].root_provider_uuid: "$ENVIRON['CN1_UUID']" - -# Before microversion 1.29, it isn't aware of nested providers. -# Namely, it can return non-root providers for allocation candidates, -- name: get allocation candidates only nested provider old microversion - GET: /allocation_candidates?resources=SRIOV_NET_VF:4 - status: 200 - request_headers: - openstack-api-version: placement 1.28 - response_json_paths: - $.allocation_requests.`len`: 4 - $.provider_summaries.`len`: 4 - -- name: get allocation candidates only nested provider new microversion - GET: /allocation_candidates?resources=SRIOV_NET_VF:4 - status: 200 - request_headers: - openstack-api-version: placement 1.29 - response_json_paths: - $.allocation_requests.`len`: 4 - $.provider_summaries.`len`: 10 - -# ...but it can't return combinations of providers in a tree. -- name: get allocation candidates root and nested old microversion - GET: /allocation_candidates?resources=VCPU:1,SRIOV_NET_VF:4 - status: 200 - request_headers: - openstack-api-version: placement 1.28 - response_json_paths: - $.allocation_requests.`len`: 0 - $.provider_summaries.`len`: 0 - -- name: get allocation candidates root and nested new microversion - GET: /allocation_candidates?resources=VCPU:1,SRIOV_NET_VF:4 - status: 200 - request_headers: - openstack-api-version: placement 1.29 - response_json_paths: - $.allocation_requests.`len`: 4 - $.provider_summaries.`len`: 10 - $.allocation_requests..allocations["$ENVIRON['CN1_UUID']"].resources.VCPU: [1, 1] - $.allocation_requests..allocations["$ENVIRON['PF1_1_UUID']"].resources.SRIOV_NET_VF: 4 - $.allocation_requests..allocations["$ENVIRON['PF1_2_UUID']"].resources.SRIOV_NET_VF: 4 - $.allocation_requests..allocations["$ENVIRON['CN2_UUID']"].resources.VCPU: [1, 1] - $.allocation_requests..allocations["$ENVIRON['PF2_1_UUID']"].resources.SRIOV_NET_VF: 4 - $.allocation_requests..allocations["$ENVIRON['PF2_2_UUID']"].resources.SRIOV_NET_VF: 4 - -# Make sure that old microversions can return combinations where -# sharing providers are involved -- name: get allocation candidates shared and nested old microversion - GET: /allocation_candidates?resources=DISK_GB:10,SRIOV_NET_VF:4 - status: 200 - request_headers: - openstack-api-version: placement 1.28 - response_json_paths: - $.allocation_requests.`len`: 4 - $.provider_summaries.`len`: 5 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-12.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-12.yaml deleted file mode 100644 index 0b951347ad99..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-12.yaml +++ /dev/null @@ -1,130 +0,0 @@ -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.12 - -tests: - -- name: put an allocation listish - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - JSON does not validate - -- name: put resource provider not uuid - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: - nice_house_friend: - resources: - VCPU: 1 - DISK_GB: 20 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 400 - response_strings: - - JSON does not validate - - does not match any of the regexes - -- name: put resource class not valid - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - vcpu: 1 - DISK_GB: 20 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 400 - response_strings: - - JSON does not validate - - does not match any of the regexes - -- name: put empty allocations - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: {} - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 400 - response_strings: - - JSON does not validate - - does not have enough properties - -- name: put unused field - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 1 - DISK_GB: 20 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - bad_field: moo - status: 400 - response_strings: - - JSON does not validate - -- name: create the resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - -- name: set some inventory - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 0 - inventories: - DISK_GB: - total: 2048 - min_unit: 10 - max_unit: 1024 - VCPU: - total: 96 - status: 200 - -- name: put an allocation dictish - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 1 - DISK_GB: 20 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 - -- name: get that allocation - GET: $LAST_URL - -- name: put that same allocation back - PUT: $LAST_URL - data: - # there's a generation in allocations, ignored - allocations: $RESPONSE['$.allocations'] - # project_id and user_id not in the get response so we add it - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml deleted file mode 100644 index b42d556d91db..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml +++ /dev/null @@ -1,152 +0,0 @@ -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - openstack-api-version: placement 1.8 - -tests: - -- name: put an allocation no project_id or user_id - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - status: 400 - response_strings: - - Failed validating 'required' in schema - -- name: put an allocation no project_id - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - Failed validating 'required' in schema - -- name: put an allocation no user_id - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - status: 400 - response_strings: - - Failed validating 'required' in schema - -- name: put an allocation project_id less than min length - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: "" - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - "Failed validating 'minLength'" - -- name: put an allocation user_id less than min length - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: "" - status: 400 - response_strings: - - "Failed validating 'minLength'" - -- name: put an allocation project_id exceeds max length - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: 78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1 - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - "Failed validating 'maxLength'" - -- name: put an allocation user_id exceeds max length - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: 78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1 - status: 400 - response_strings: - - "Failed validating 'maxLength'" - -- name: create the resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - -- name: post some inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - min_unit: 10 - max_unit: 1024 - status: 201 - -- name: put an allocation - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1.28.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-1.28.yaml deleted file mode 100644 index 77358836a560..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1.28.yaml +++ /dev/null @@ -1,255 +0,0 @@ -fixtures: - - AllocationFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.28 -# -# Scenarios to test -# Start with no consumers -# old, no CG = success, consumer gets created -# new, no CG = fail, due to schema -# new, CG=None = success, consumer gets created -# new, CG= = fail -# Create an allocation, and with it, a consumer -# Now create another allocation -# old, no CG = success -# new, CG=None = fail -# new, CG !match = fail -# new, get CG from /allocations -# new, CG matches = success - -tests: - -- name: old version no gen no existing - PUT: /allocations/11111111-1111-1111-1111-111111111111 - request_headers: - openstack-api-version: placement 1.27 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 204 - -- name: new version no gen no existing - PUT: /allocations/22222222-2222-2222-2222-222222222222 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - JSON does not validate - -- name: new version gen is not null no existing - PUT: /allocations/22222222-2222-2222-2222-222222222222 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 5 - status: 409 - response_strings: - - consumer generation conflict - - expected null but got 5 - response_json_paths: - $.errors[0].code: placement.concurrent_update - -- name: new version gen is None no existing - PUT: /allocations/22222222-2222-2222-2222-222222222222 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 204 - -- name: new version any gen no existing - PUT: /allocations/33333333-3333-3333-3333-333333333333 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 33 - status: 409 - response_strings: - - consumer generation conflict - -# Now create an allocation for a specific consumer -- name: put an allocation - PUT: /allocations/44444444-4444-4444-4444-444444444444 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 204 - -- name: new version no gen existing - PUT: /allocations/44444444-4444-4444-4444-444444444444 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 409 - response_strings: - - consumer generation conflict - -- name: get the current consumer generation - GET: /allocations/44444444-4444-4444-4444-444444444444 - status: 200 - -- name: new version matching gen existing - PUT: /allocations/44444444-4444-4444-4444-444444444444 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: $HISTORY["get the current consumer generation"].$RESPONSE["consumer_generation"] - status: 204 - -- name: new version mismatch gen existing - PUT: /allocations/44444444-4444-4444-4444-444444444444 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 12 - status: 409 - response_strings: - - consumer generation conflict - response_json_paths: - $.errors[0].code: placement.concurrent_update - -- name: old version no gen existing - PUT: /allocations/44444444-4444-4444-4444-444444444444 - request_headers: - openstack-api-version: placement 1.27 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 10 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 204 - -- name: new version serialization contains consumer generation - GET: /allocations/44444444-4444-4444-4444-444444444444 - status: 200 - response_json_paths: - $.consumer_generation: /^\d+$/ - -- name: empty allocations dict now possible in PUT /allocations/{consumer_uuid} - PUT: /allocations/44444444-4444-4444-4444-444444444444 - data: - allocations: {} - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: $HISTORY["new version serialization contains consumer generation"].$RESPONSE["consumer_generation"] - status: 204 - -- name: should now return no allocations for this consumer - GET: /allocations/44444444-4444-4444-4444-444444444444 - status: 200 - response_json_paths: - $.allocations.`len`: 0 - -# The following tests cover cases where we are putting allocations to -# multiple resource providers from one consumer uuid, both a brand new -# consumer and an existing one. - -- name: create shared disk - POST: /resource_providers - data: - name: shared_disker - uuid: 8aa83304-4b6d-4a23-b954-06d8b36b206a - -- name: trait that disk - PUT: /resource_providers/8aa83304-4b6d-4a23-b954-06d8b36b206a/traits - data: - resource_provider_generation: $RESPONSE['$.generation'] - traits: - - MISC_SHARES_VIA_AGGREGATE - - STORAGE_DISK_SSD - - -- name: set disk inventory - PUT: /resource_providers/8aa83304-4b6d-4a23-b954-06d8b36b206a/inventories - data: - inventories: - DISK_GB: - total: 5000 - resource_provider_generation: $RESPONSE['$.resource_provider_generation'] - -- name: disk in aggregate - PUT: /resource_providers/8aa83304-4b6d-4a23-b954-06d8b36b206a/aggregates - data: - resource_provider_generation: $RESPONSE['$.resource_provider_generation'] - aggregates: - - 7fade9e1-ab01-4d1b-84db-ac74f740bb42 - -- name: compute in aggregate - PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates - request_headers: - # avoid generation in aggregates - openstack-api-version: placement 1.10 - data: - - 7fade9e1-ab01-4d1b-84db-ac74f740bb42 - -- name: get candidates with shared - GET: /allocation_candidates?resources=VCPU:1,DISK_GB:200&required=STORAGE_DISK_SSD - response_json_paths: - $.allocation_requests.`len`: 1 - $.allocation_requests[0].allocations['$ENVIRON["RP_UUID"]'].resources.VCPU: 1 - $.allocation_requests[0].allocations['8aa83304-4b6d-4a23-b954-06d8b36b206a'].resources.DISK_GB: 200 - -- name: put that allocation to new consumer - PUT: /allocations/55555555-5555-5555-5555-555555555555 - data: - allocations: $RESPONSE['$.allocation_requests[0].allocations'] - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 204 - -- name: put that allocation to existing consumer - PUT: /allocations/22222222-2222-2222-2222-222222222222 - data: - allocations: $HISTORY['get candidates with shared'].$RESPONSE['$.allocation_requests[0].allocations'] - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - # we just happen to know this is supposed to be 1 here, so shortcutting - consumer_generation: 1 - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1714072.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1714072.yaml deleted file mode 100644 index fa25935b6f43..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1714072.yaml +++ /dev/null @@ -1,97 +0,0 @@ -# Bug 1714072 describes a situation where a resource provider is present in the -# body of an allocation, but the resources object is empty. There should be at -# least one resource class and value pair. If there is not a 400 response -# should be returned. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - # Default to <= 1.11 so the PUT /allocations in here that use the - # older list-ish format continue to work. - openstack-api-version: placement 1.11 - -tests: - -- name: create a resource provider - POST: /resource_providers - data: - name: an rp - status: 201 - -- name: get resource provider - GET: $LOCATION - status: 200 - -- name: add inventory to an rp - PUT: $RESPONSE['$.links[?rel = "inventories"].href'] - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 24 - MEMORY_MB: - total: 1024 - status: 200 - -- name: put a successful allocation - PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940 - data: - allocations: - - resource_provider: - uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid'] - resources: - VCPU: 1 - MEMORY_MB: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 - -- name: fail with empty resources - PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940 - data: - allocations: - - resource_provider: - uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid'] - resources: {} - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 400 - response_strings: - - does not have enough properties - -# The next two tests confirm that the bug identified by -# this file's name is not present in the PUT /allocations/{consumer_uuid} -# format added by microversion 1.12. - -- name: put a successful dictish allocation - PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940 - request_headers: - openstack-api-version: placement 1.12 - data: - allocations: - $HISTORY['get resource provider'].$RESPONSE['$.uuid']: - resources: - VCPU: 1 - MEMORY_MB: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 - -- name: fail with empty resources dictish - PUT: /allocations/c9f0186b-64f8-44fb-b6c9-83008d8d6940 - request_headers: - openstack-api-version: placement 1.12 - data: - allocations: - $HISTORY['get resource provider'].$RESPONSE['$.uuid']: - resources: {} - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 400 - response_strings: - - does not have enough properties diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778591.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778591.yaml deleted file mode 100644 index 0bd7fa38f499..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778591.yaml +++ /dev/null @@ -1,71 +0,0 @@ -# Demonstrate part of bug 1778591, where when creating an allocation for -# a new consumer will create the consumer and its generation, but if it -# fails the subsequent request requires generation 0, not null, which is -# not what we expect. This is made more problematic in the we cannot query -# the generation when the consumer has no allocations. - - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - # consumer generations were added in 1.28 - openstack-api-version: placement 1.28 - content-type: application/json - accept: application/json - -tests: -# create a simple resource provider with limited inventory - -- name: create provider - POST: /resource_providers - data: - name: simple - uuid: $ENVIRON['RP_UUID'] - -- name: set inventory - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 4 - -- name: fail allocations new consumer, bad capacity - PUT: /allocations/88888888-8888-8888-8888-888888888888 - data: - allocations: - "$ENVIRON['RP_UUID']": - resources: - VCPU: 9999 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 409 - response_strings: - - The requested amount would exceed the capacity - -- name: try to get consumer generation - desc: when there are no allocations we can't see the generation of a consumer - GET: /allocations/88888888-8888-8888-8888-888888888888 - response_json_paths: - # check entire response - $: - allocations: {} - -# The failure to allocate above should have deleted the auto-created consumer, -# so when we retry the allocation here, we should be able to use the -# appropriate null generation to indicate this is a new consumer -- name: retry allocations new consumer, still null gen - PUT: /allocations/88888888-8888-8888-8888-888888888888 - data: - allocations: - "$ENVIRON['RP_UUID']": - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778743.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778743.yaml deleted file mode 100644 index 17bb002bf6ec..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1778743.yaml +++ /dev/null @@ -1,70 +0,0 @@ -# Test to see if capacity check in POST allocations works as expected. -# It did not, due to bug 1778743, but it is now fixed. - - -fixtures: - - APIFixture - -defaults: - request_headers: - # 1.28 provides consumer generation in allocations - openstack-api-version: placement 1.28 - x-auth-token: admin - content-type: application/json - accept: application/json - -tests: - -- name: create an rp - POST: /resource_providers - data: - uuid: 4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55 - name: rp1 - -- name: add vcpu inventory - PUT: /resource_providers/4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55/inventories - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 2 - -- name: post multiple allocations - desc: this should 409 because we're allocating 3 VCPU! - POST: /allocations - data: - a6ace019-f230-4dcc-8a76-36d27b9c2257: - allocations: - 4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55: - resources: - VCPU: 1 - project_id: a2cec092-0f67-42ed-b870-f3925cc5c6d4 - user_id: d28385b2-7860-4055-b32d-4cd1057cd5f2 - consumer_generation: null - 2e613d4f-f5b2-4956-bd61-ea5be6600f80: - allocations: - 4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55: - resources: - VCPU: 1 - project_id: a2cec092-0f67-42ed-b870-f3925cc5c6d4 - user_id: d28385b2-7860-4055-b32d-4cd1057cd5f2 - consumer_generation: null - 2b3abca1-b72b-4817-9217-397f19b52c92: - allocations: - 4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55: - resources: - VCPU: 1 - project_id: a2cec092-0f67-42ed-b870-f3925cc5c6d4 - user_id: d28385b2-7860-4055-b32d-4cd1057cd5f2 - consumer_generation: null - status: 409 - -- name: check usage - GET: /resource_providers/4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55/usages - response_json_paths: - $.usages.VCPU: 0 - -- name: check inventory - GET: /resource_providers/4e05a85b-e8a6-4b3a-82c1-5f6ad3f71d55/inventories - response_json_paths: - $.inventories.VCPU.total: 2 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1779717.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1779717.yaml deleted file mode 100644 index 13de8aa41dce..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-bug-1779717.yaml +++ /dev/null @@ -1,102 +0,0 @@ -# Test that it's possible to change the project or user identifier for a -# consumer by specifying a different project_id or user_id value in the payload -# of both a PUT /allocations/{consumer_uuid} or POST /allocations - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.28 - -tests: - -- name: create cn1 - POST: /resource_providers - data: - name: cn1 - status: 200 - -- name: add inventory - PUT: $HISTORY['create cn1'].$RESPONSE['links[?rel = "inventories"].href'] - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 16 - MEMORY_MB: - total: 2048 - -- name: create allocations for consumer1 - PUT: /allocations/11111111-1111-1111-1111-111111111111 - data: - allocations: - $HISTORY['create cn1'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - status: 204 - -- name: get allocations for consumer1 - GET: /allocations/11111111-1111-1111-1111-111111111111 - status: 200 - response_json_paths: - $.project_id: $ENVIRON['PROJECT_ID'] - $.user_id: $ENVIRON['USER_ID'] - -- name: change the project for consumer1 - PUT: /allocations/11111111-1111-1111-1111-111111111111 - data: - allocations: - $HISTORY['create cn1'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - status: 204 - -- name: check consumer1's project is now the other project - GET: /allocations/11111111-1111-1111-1111-111111111111 - status: 200 - response_json_paths: - $.project_id: $ENVIRON['PROJECT_ID_ALT'] - $.user_id: $ENVIRON['USER_ID'] - -- name: create allocations for two consumers - POST: /allocations - data: - 11111111-1111-1111-1111-111111111111: - allocations: - $HISTORY['create cn1'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 1 - consumer_generation: 2 - # Change consumer1's project back to the original PROJECT_ID - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - 22222222-2222-2222-2222-222222222222: - allocations: - $HISTORY['create cn1'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 1 - consumer_generation: null - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID_ALT'] - status: 204 - -- name: check consumer1's project is back to the original project - GET: /allocations/11111111-1111-1111-1111-111111111111 - status: 200 - response_json_paths: - $.project_id: $ENVIRON['PROJECT_ID'] - $.user_id: $ENVIRON['USER_ID'] diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-policy.yaml deleted file mode 100644 index ebee30cb6150..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-policy.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# This tests the individual CRUD operations on -# /allocations* and /resource_providers/{uuid}/allocations using a non-admin -# user with an open policy configuration. The response validation is -# intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: create resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - -- name: set some inventory - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 0 - inventories: - DISK_GB: - total: 2048 - min_unit: 10 - max_unit: 1024 - VCPU: - total: 96 - status: 200 - -- name: create allocation for consumer - PUT: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - data: - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 1 - DISK_GB: 20 - consumer_generation: null - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 - -- name: list allocations for consumer - GET: $LAST_URL - -- name: list allocations for resource provider - GET: /resource_providers/$ENVIRON['RP_UUID']/allocations - -- name: manage allocations - POST: /allocations - data: - a0b15655-273a-4b3d-9792-2e579b7d5ad9: - consumer_generation: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 8 - DISK_GB: 40 - status: 204 - -- name: delete allocation for consumer - DELETE: /allocations/a0b15655-273a-4b3d-9792-2e579b7d5ad9 - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-post.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-post.yaml deleted file mode 100644 index 6b844477010f..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations-post.yaml +++ /dev/null @@ -1,399 +0,0 @@ -# Test that it possible to POST multiple allocations to /allocations to -# simultaneously make changes, including removing resources for a consumer if -# the allocations are empty. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.13 - -tests: - -- name: create compute one - POST: /resource_providers - data: - name: compute01 - status: 201 - -- name: rp compute01 - desc: provide a reference for later reuse - GET: $LOCATION - -- name: create compute two - POST: /resource_providers - data: - name: compute02 - status: 201 - -- name: rp compute02 - desc: provide a reference for later reuse - GET: $LOCATION - -- name: create shared disk - POST: /resource_providers - data: - name: storage01 - status: 201 - -- name: rp storage01 - desc: provide a reference for later reuse - GET: $LOCATION - -- name: inventory compute01 - PUT: $HISTORY['rp compute01'].$RESPONSE['links[?rel = "inventories"].href'] - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 16 - MEMORY_MB: - total: 2048 - -- name: inventory compute02 - PUT: $HISTORY['rp compute02'].$RESPONSE['links[?rel = "inventories"].href'] - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 16 - MEMORY_MB: - total: 2048 - -- name: inventory storage01 - PUT: $HISTORY['rp storage01'].$RESPONSE['links[?rel = "inventories"].href'] - data: - resource_provider_generation: 0 - inventories: - DISK_GB: - total: 4096 - -- name: confirm only POST - GET: /allocations - status: 405 - response_headers: - allow: POST - -- name: 404 on older 1.12 microversion post - POST: /allocations - request_headers: - openstack-api-version: placement 1.12 - status: 404 - -- name: post allocations two consumers - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - $ENVIRON['MIGRATION_UUID']: - allocations: - $HISTORY['rp compute01'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 204 - -- name: get allocations for instance consumer - GET: /allocations/$ENVIRON['INSTANCE_UUID'] - request_headers: - # We want to inspect the consumer generations... - openstack-api-version: placement 1.28 - response_json_paths: - $.allocations["$HISTORY['rp compute02'].$RESPONSE['uuid']"].resources[MEMORY_MB]: 1024 - $.allocations["$HISTORY['rp compute02'].$RESPONSE['uuid']"].resources[VCPU]: 2 - $.allocations["$HISTORY['rp storage01'].$RESPONSE['uuid']"].resources[DISK_GB]: 5 - $.consumer_generation: 1 - $.project_id: $ENVIRON['PROJECT_ID'] - $.user_id: $ENVIRON['USER_ID'] - -- name: get allocations for migration consumer - GET: /allocations/$ENVIRON['MIGRATION_UUID'] - request_headers: - # We want to inspect the consumer generations... - openstack-api-version: placement 1.28 - response_json_paths: - $.allocations["$HISTORY['rp compute01'].$RESPONSE['uuid']"].resources[MEMORY_MB]: 1024 - $.allocations["$HISTORY['rp compute01'].$RESPONSE['uuid']"].resources[VCPU]: 2 - $.consumer_generation: 1 - $.project_id: $ENVIRON['PROJECT_ID'] - $.user_id: $ENVIRON['USER_ID'] - -- name: confirm usages - GET: /usages?project_id=$ENVIRON['PROJECT_ID'] - response_json_paths: - $.usages.DISK_GB: 5 - $.usages.VCPU: 4 - $.usages.MEMORY_MB: 2048 - -- name: clear and set allocations - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - $ENVIRON['MIGRATION_UUID']: - allocations: {} - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 204 - -- name: confirm usages after clear - GET: /usages?project_id=$ENVIRON['PROJECT_ID'] - response_json_paths: - $.usages.DISK_GB: 5 - $.usages.VCPU: 2 - $.usages.MEMORY_MB: 1024 - -- name: post allocations two users - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - # We must use a fresh consumer id with the alternate project id info. - # A previously seen consumer id will be assumed to always have the same - # project and user. - $ENVIRON['CONSUMER_UUID']: - allocations: - $HISTORY['rp compute01'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID_ALT'] - status: 204 - -- name: confirm usages user a - GET: /usages?project_id=$ENVIRON['PROJECT_ID'] - response_json_paths: - $.usages.`len`: 3 - $.usages.DISK_GB: 5 - $.usages.VCPU: 2 - $.usages.MEMORY_MB: 1024 - -- name: confirm usages user b - GET: /usages?project_id=$ENVIRON['PROJECT_ID_ALT'] - response_json_paths: - $.usages.`len`: 2 - $.usages.VCPU: 2 - $.usages.MEMORY_MB: 1024 - -- name: fail allocations over capacity - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - $ENVIRON['CONSUMER_UUID']: - allocations: - $HISTORY['rp compute01'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 2049 - VCPU: 2 - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID_ALT'] - status: 409 - response_strings: - - The requested amount would exceed the capacity - -- name: fail allocations deep schema violate - desc: no schema yet - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - cow: moo - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 400 - -- name: fail allocations shallow schema violate - desc: no schema yet - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - cow: moo - status: 400 - -- name: fail resource provider not exist - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - # this rp does not exist - 'c42def7b-498b-4442-9502-c7970b14bea4': - resources: - MEMORY_MB: 1024 - VCPU: 2 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - that does not exist - -- name: fail resource class not in inventory - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - PCI_DEVICE: 1 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 409 - response_strings: - - "Inventory for 'PCI_DEVICE' on" - -- name: fail resource class not exist - POST: /allocations - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - CUSTOM_PONY: 1 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 400 - response_strings: - - No such resource class CUSTOM_PONY - -- name: fail missing consumer generation >= 1.28 - POST: /allocations - request_headers: - openstack-api-version: placement 1.28 - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 2 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 5 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - $ENVIRON['CONSUMER_UUID']: - allocations: - $HISTORY['rp compute01'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 2049 - VCPU: 2 - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID_ALT'] - status: 400 - response_strings: - - JSON does not validate - -- name: fail incorrect consumer generation >= 1.28 - POST: /allocations - request_headers: - openstack-api-version: placement 1.28 - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 1 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 4 - consumer_generation: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - $ENVIRON['CONSUMER_UUID']: - allocations: - $HISTORY['rp compute01'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 1 - consumer_generation: 1 - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID_ALT'] - status: 409 - response_strings: - - consumer generation conflict - expected 3 but got 1 - -- name: change allocations for existing providers >= 1.28 - POST: /allocations - request_headers: - openstack-api-version: placement 1.28 - data: - $ENVIRON['INSTANCE_UUID']: - allocations: - $HISTORY['rp compute02'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 1 - $HISTORY['rp storage01'].$RESPONSE['uuid']: - resources: - DISK_GB: 4 - consumer_generation: 3 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - $ENVIRON['CONSUMER_UUID']: - allocations: - $HISTORY['rp compute01'].$RESPONSE['uuid']: - resources: - MEMORY_MB: 1024 - VCPU: 1 - consumer_generation: 1 - project_id: $ENVIRON['PROJECT_ID_ALT'] - user_id: $ENVIRON['USER_ID_ALT'] - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations.yaml deleted file mode 100644 index 726c211c15a0..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/allocations.yaml +++ /dev/null @@ -1,509 +0,0 @@ -# Tests of allocations API -# -# Note(cdent): Consumer ids are not validated against anything to -# confirm that they are associated with anything real. This is -# by design. - - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - -tests: - -- name: get allocations no consumer is 405 - GET: /allocations - status: 405 - response_json_paths: - $.errors[0].title: Method Not Allowed - -- name: get allocations is empty dict - GET: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - response_json_paths: - $.allocations: {} - -- name: put an allocation no resource provider - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resources: - DISK_GB: 10 - status: 400 - response_json_paths: - $.errors[0].title: Bad Request - -- name: create the resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - -- name: put an allocation no data - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - status: 400 - response_json_paths: - $.errors[0].title: Bad Request - -- name: put an allocation empty list - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: [] - status: 400 - response_strings: - - "Failed validating 'minItems'" - -- name: put an allocation violate schema - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - cow: 10 - status: 400 - response_json_paths: - $.errors[0].title: Bad Request - -- name: put an allocation no inventory - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - status: 409 - response_json_paths: - $.errors[0].title: Conflict - -- name: post some inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - min_unit: 10 - max_unit: 1024 - status: 201 - -- name: put an allocation with zero usage - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 0 - status: 400 - response_strings: - - "JSON does not validate: 0 is less than the minimum of 1" - - Failed validating 'minimum' in schema - -- name: put an allocation with omitted usage - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - status: 400 - response_strings: - - Failed validating 'required' in schema - - -- name: put an allocation - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - status: 204 - -- name: fail to delete that provider - DELETE: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - # we need this microversion to get error codes in the response - openstack-api-version: placement 1.23 - status: 409 - response_strings: - - "Unable to delete resource provider $ENVIRON['RP_UUID']" - response_json_paths: - errors[0].code: placement.resource_provider.inuse - - -- name: put an allocation different consumer - PUT: /allocations/39715579-2167-4c63-8247-301311cc6703 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - status: 204 - -- name: check usages after another 10 - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_json_paths: - $.usages.DISK_GB: 20 - -# NOTE(cdent): Contravening the spec, we decided that it is -# important to be able to update an existing allocation, so this -# should work but it is important to check the usage. -- name: put allocation again - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 12 - status: 204 - -- name: check usages after 12 - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_json_paths: - $.usages.DISK_GB: 22 - -- name: put allocation bad resource class - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - COWS: 12 - status: 400 - response_strings: - - Unable to allocate inventory for consumer - - No such resource class COWS - response_json_paths: - $.errors[0].title: Bad Request - -- name: delete allocation - DELETE: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - status: 204 - -- name: delete allocation again - DELETE: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - status: 404 - response_strings: - - No allocations for consumer '599ffd2d-526a-4b2e-8683-f13ad25f9958' - response_json_paths: - $.errors[0].title: Not Found - -- name: delete allocation of unknown consumer id - DELETE: /allocations/da78521f-bf7e-4e6e-9901-3f79bd94d55d - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: redo an allocation - PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - status: 204 - -- name: add other inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: VCPU - total: 32 - min_unit: 1 - max_unit: 8 - status: 201 - -- name: multiple allocations - PUT: /allocations/833f0885-f78c-4788-bb2b-3607b0656be7 - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 20 - VCPU: 4 - status: 204 - -- name: check usages - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_json_paths: - $.resource_provider_generation: 7 - $.usages.DISK_GB: 40 - -- name: check allocations for the resource provider - GET: /resource_providers/$ENVIRON['RP_UUID']/allocations - response_json_paths: - $.resource_provider_generation: 7 - # allocations are keyed by consumer id, jsonpath-rw needs us - # to quote the uuids or its parser gets confused that maybe - # they are numbers on which math needs to be done. - $.allocations['833f0885-f78c-4788-bb2b-3607b0656be7'].resources.DISK_GB: 20 - $.allocations['833f0885-f78c-4788-bb2b-3607b0656be7'].resources.VCPU: 4 - $.allocations['599ffd2d-526a-4b2e-8683-f13ad25f9958'].resources.DISK_GB: 10 - $.allocations['39715579-2167-4c63-8247-301311cc6703'].resources.DISK_GB: 10 - -- name: confirm 404 for allocations of bad resource provider - GET: /resource_providers/cb8a3007-b93a-471f-9e1f-4d58355678bd/allocations - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: check allocations by consumer id - GET: /allocations/833f0885-f78c-4788-bb2b-3607b0656be7 - response_json_paths: - $.allocations["$ENVIRON['RP_UUID']"].generation: 7 - $.allocations["$ENVIRON['RP_UUID']"].resources.DISK_GB: 20 - $.allocations["$ENVIRON['RP_UUID']"].resources.VCPU: 4 - -- name: check allocations by different consumer id - GET: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958 - response_json_paths: - $.allocations["$ENVIRON['RP_UUID']"].generation: 7 - $.allocations["$ENVIRON['RP_UUID']"].resources.DISK_GB: 10 - -# create another two resource providers to test retrieving -# allocations -- name: create resource provider 1 - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: rp1 - uuid: 9229b2fc-d556-4e38-9c18-443e4bc6ceae - status: 201 - -- name: create resource provider 2 - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: rp2 - uuid: fcfa516a-abbe-45d1-8152-d5225d82e596 - status: 201 - -- name: set inventory on rp1 - PUT: /resource_providers/9229b2fc-d556-4e38-9c18-443e4bc6ceae/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 32 - max_unit: 32 - DISK_GB: - total: 10 - max_unit: 10 - -- name: set inventory on rp2 - PUT: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 16 - max_unit: 16 - DISK_GB: - total: 20 - max_unit: 20 - status: 200 - -- name: put allocations on both those providers one - PUT: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: fcfa516a-abbe-45d1-8152-d5225d82e596 - resources: - DISK_GB: 10 - VCPU: 8 - - resource_provider: - uuid: 9229b2fc-d556-4e38-9c18-443e4bc6ceae - resources: - DISK_GB: 5 - VCPU: 16 - status: 204 - -- name: put allocations on both those providers two - PUT: /allocations/75d0f5f7-75d9-458c-b204-f90ac91604ec - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: fcfa516a-abbe-45d1-8152-d5225d82e596 - resources: - DISK_GB: 5 - VCPU: 4 - - resource_provider: - uuid: 9229b2fc-d556-4e38-9c18-443e4bc6ceae - resources: - DISK_GB: 2 - VCPU: 8 - status: 204 - # These headers should not be present in any microversion on PUT - # because there is no response body. - response_forbidden_headers: - - cache-control - - last-modified - -- name: get those allocations for consumer - GET: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b - response_json_paths: - $.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].generation: 3 - $.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.DISK_GB: 10 - $.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.VCPU: 8 - $.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].generation: 3 - $.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.DISK_GB: 5 - $.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.VCPU: 16 - -- name: get those allocations for resource provider - GET: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/allocations - response_json_paths: - $.resource_provider_generation: 3 - $.allocations.['75d0f5f7-75d9-458c-b204-f90ac91604ec'].resources.DISK_GB: 5 - $.allocations.['75d0f5f7-75d9-458c-b204-f90ac91604ec'].resources.VCPU: 4 - $.allocations.['1835b1c9-1c61-45af-9eb3-3e0e9f29487b'].resources.DISK_GB: 10 - $.allocations.['1835b1c9-1c61-45af-9eb3-3e0e9f29487b'].resources.VCPU: 8 - -- name: put allocations on existing consumer with dashless UUID - PUT: /allocations/75d0f5f775d9458cb204f90ac91604ec - request_headers: - content-type: application/json - # Consumer generation - openstack-api-version: placement 1.28 - data: - allocations: - fcfa516a-abbe-45d1-8152-d5225d82e596: - resources: - DISK_GB: 1 - VCPU: 1 - 9229b2fc-d556-4e38-9c18-443e4bc6ceae: - resources: - DISK_GB: 1 - VCPU: 1 - consumer_generation: 1 - project_id: 00000000-0000-0000-0000-000000000000 - user_id: 00000000-0000-0000-0000-000000000000 - status: 204 - -- name: get allocations on existing consumer with dashed UUID - GET: /allocations/75d0f5f7-75d9-458c-b204-f90ac91604ec - response_json_paths: - $.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].generation: 4 - $.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.DISK_GB: 1 - $.allocations.['fcfa516a-abbe-45d1-8152-d5225d82e596'].resources.VCPU: 1 - $.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].generation: 4 - $.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.DISK_GB: 1 - $.allocations.['9229b2fc-d556-4e38-9c18-443e4bc6ceae'].resources.VCPU: 1 - -- name: put an allocation for a not existing resource provider - PUT: /allocations/75d0f5f7-75d9-458c-b204-f90ac91604ec - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: be8b9cba-e7db-4a12-a386-99b4242167fe - resources: - DISK_GB: 5 - VCPU: 4 - status: 400 - response_strings: - - Allocation for resource provider 'be8b9cba-e7db-4a12-a386-99b4242167fe' that does not exist - response_json_paths: - $.errors[0].title: Bad Request - -- name: get allocations for resource provider with cache headers 1.15 - GET: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/allocations - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: get allocations for resource provider without cache headers 1.14 - GET: /resource_providers/fcfa516a-abbe-45d1-8152-d5225d82e596/allocations - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - cache-control - - last-modified - -- name: get allocations for consumer with cache headers 1.15 - GET: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: get allocations for consumer without cache headers 1.14 - GET: /allocations/1835b1c9-1c61-45af-9eb3-3e0e9f29487b - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - cache-control - - last-modified - -- name: creating allocation with a non UUID consumer fails - PUT: /allocations/not-a-uuid - request_headers: - content-type: application/json - data: - allocations: - - resource_provider: - uuid: fcfa516a-abbe-45d1-8152-d5225d82e596 - resources: - DISK_GB: 1 - VCPU: 1 - status: 400 - response_strings: - - Malformed consumer_uuid diff --git a/nova/tests/functional/api/openstack/placement/gabbits/basic-http.yaml b/nova/tests/functional/api/openstack/placement/gabbits/basic-http.yaml deleted file mode 100644 index 584d1eeb7148..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/basic-http.yaml +++ /dev/null @@ -1,207 +0,0 @@ -# -# Test the basic handling of HTTP (expected response codes and the -# like). -# - -fixtures: - - APIFixture - -defaults: - request_headers: - # NOTE(cdent): Get past keystone, even though at this stage - # we don't require auth. - x-auth-token: admin - accept: application/json - -tests: -- name: 404 at no service - GET: /barnabas - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: error message has request id - GET: /barnabas - status: 404 - response_json_paths: - $.errors[0].request_id: /req-[a-fA-F0-9-]+/ - -- name: error message has default code 1.23 - GET: /barnabas - status: 404 - request_headers: - openstack-api-version: placement 1.23 - response_json_paths: - $.errors[0].code: placement.undefined_code - -- name: 404 at no resource provider - GET: /resource_providers/fd0dd55c-6330-463b-876c-31c54e95cb95 - status: 404 - -- name: 405 on bad method at root - DELETE: / - status: 405 - response_headers: - allow: GET - response_json_paths: - $.errors[0].title: Method Not Allowed - -- name: 200 at home - GET: / - status: 200 - -- name: 405 on bad method on app - DELETE: /resource_providers - status: 405 - response_headers: - allow: /(GET|POST), (POST|GET)/ - response_json_paths: - $.errors[0].title: Method Not Allowed - response_strings: - - The method DELETE is not allowed for this resource. - -- name: 405 on bad options method on app - OPTIONS: /resource_providers - status: 405 - response_headers: - allow: /(GET|POST), (POST|GET)/ - response_json_paths: - $.errors[0].title: Method Not Allowed - response_strings: - - The method OPTIONS is not allowed for this resource. - -- name: bad accept resource providers - GET: /resource_providers - request_headers: - accept: text/plain - status: 406 - -- name: complex accept resource providers - GET: /resource_providers - request_headers: - accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 - status: 200 - response_json_paths: - $.resource_providers: [] - -- name: post resource provider wrong content-type - POST: /resource_providers - request_headers: - content-type: text/plain - data: I want a resource provider please - status: 415 - -- name: post resource provider missing content-type - desc: because content-length is set, we should have a content-type - POST: /resource_providers - data: I want a resource provider please - status: 400 - response_strings: - - content-type header required - -# NOTE(cdent): This is an awkward test. It is not actually testing a -# PUT of a resource provider. It is confirming that a PUT with no -# body, no content-length header and no content-type header will -# reach the desired handler. -- name: PUT resource provider no body - desc: different response string from prior test indicates past content-length requirement - PUT: /resource_providers/d3a64825-8228-4ccb-8a6c-1c6d3eb6a3e8 - status: 415 - response_strings: - - The media type None is not supported, use application/json - -- name: post resource provider schema mismatch - POST: /resource_providers - request_headers: - content-type: application/json - data: - transport: car - color: blue - status: 400 - -- name: post good resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - -- name: get resource provider wrong accept - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - accept: text/plain - status: 406 - response_strings: - - Only application/json is provided - -- name: get resource provider complex accept wild match - desc: like a browser, */* should match - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - -- name: get resource provider complex accept no match - desc: no */*, no match - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - accept: text/html,application/xhtml+xml,application/xml;q=0.9 - status: 406 - -- name: put poor format resource provider - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: text/plain - data: Why U no provide? - status: 415 - -- name: non inventory sub resource provider path - GET: /resource_providers/7850178f-1807-4512-b135-0b174985405b/cows - request_headers: - accept: application/json - status: 404 - response_json_paths: - $.errors[0].title: Not Found - response_strings: - - The resource could not be found. - -- name: root at 1.15 has cache headers - GET: / - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: root at 1.14 no cache headers - GET: / - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - last-modified - - cache-control - -- name: test starred accept and errors - GET: /resource_providers/foo - request_headers: - accept: "*/*" - status: 404 - response_headers: - content-type: application/json - response_json_paths: - $.errors[0].title: Not Found - -- name: bad content length not int - POST: /resource_providers - request_headers: - content-type: application/json - content-length: hi mom - data: - uuid: ce13d7f1-9988-4dfd-8e16-ce071802eb36 - status: 400 - response_strings: - - content-length header must be an integer diff --git a/nova/tests/functional/api/openstack/placement/gabbits/bug-1674694.yaml b/nova/tests/functional/api/openstack/placement/gabbits/bug-1674694.yaml deleted file mode 100644 index 609f015740a9..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/bug-1674694.yaml +++ /dev/null @@ -1,38 +0,0 @@ -# Test launchpad bug https://bugs.launchpad.net/nova/+bug/1674694 - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - -tests: - -- name: 404 with application/json - GET: /bc8d9d50-7b0d-45ef-839c-e7b5e1c4e8fd - request_headers: - accept: application/json - status: 404 - response_headers: - content-type: application/json - response_json_paths: - $.errors[0].status: 404 - -- name: 404 with no accept - GET: /bc8d9d50-7b0d-45ef-839c-e7b5e1c4e8fd - status: 404 - response_headers: - content-type: application/json - response_json_paths: - $.errors[0].status: 404 - -- name: 404 with other accept - GET: /bc8d9d50-7b0d-45ef-839c-e7b5e1c4e8fd - status: 404 - request_headers: - accept: text/html - response_headers: - content-type: /text/html/ - response_strings: - - The resource could not be found diff --git a/nova/tests/functional/api/openstack/placement/gabbits/confirm-auth.yaml b/nova/tests/functional/api/openstack/placement/gabbits/confirm-auth.yaml deleted file mode 100644 index d4e17f6ef391..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/confirm-auth.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# -# Confirm that the noauth handler is causing a 401 when no fake -# token is provided. -# - -fixtures: - - APIFixture - -defaults: - request_headers: - accept: application/json - -tests: - - name: no token gets 200 at root - GET: / - status: 200 - - - name: with token 200 at root - GET: / - request_headers: - x-auth-token: admin:admin - status: 200 - - - name: no token gets 401 - GET: /resource_providers - status: 401 - - - name: with token 200 - GET: /resource_providers - request_headers: - x-auth-token: admin:admin - status: 200 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/cors.yaml b/nova/tests/functional/api/openstack/placement/gabbits/cors.yaml deleted file mode 100644 index 291e1d5f7085..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/cors.yaml +++ /dev/null @@ -1,47 +0,0 @@ -# Confirm that CORS is present. No complex configuration is done so -# this just tests the basics. Borrowed, in spirit, from -# nova.tests.functional.test_middleware. - -fixtures: - - CORSFixture - -defaults: - request_headers: - x-auth-token: user - -tests: -- name: valid options request - OPTIONS: / - request_headers: - origin: http://valid.example.com - access-control-request-method: GET - status: 200 - response_headers: - access-control-allow-origin: http://valid.example.com - -- name: invalid options request - OPTIONS: / - request_headers: - origin: http://invalid.example.com - access-control-request-method: GET - status: 200 - response_forbidden_headers: - - access-control-allow-origin - -- name: valid get request - GET: / - request_headers: - origin: http://valid.example.com - access-control-request-method: GET - status: 200 - response_headers: - access-control-allow-origin: http://valid.example.com - -- name: invalid get request - GET: / - request_headers: - origin: http://invalid.example.com - access-control-request-method: GET - status: 200 - response_forbidden_headers: - - access-control-allow-origin diff --git a/nova/tests/functional/api/openstack/placement/gabbits/ensure-consumer.yaml b/nova/tests/functional/api/openstack/placement/gabbits/ensure-consumer.yaml deleted file mode 100644 index 512b3deb6f80..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/ensure-consumer.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Tests of the ensure consumer behaviour for versions of the API before 1.8; -# starting with 1.8, project_id and user_id are required by the -# PUT: /allocations/{consumer_uuid} API. -fixtures: - - AllocationFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - openstack-api-version: placement 1.7 - -vars: -- &default_incomplete_id 00000000-0000-0000-0000-000000000000 -tests: - -- name: put an allocation without project/user (1.7) - PUT: /allocations/$ENVIRON['CONSUMER_UUID'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.7 - data: - allocations: - - resource_provider: - uuid: $ENVIRON['RP_UUID'] - resources: - DISK_GB: 10 - status: 204 - -# We now ALWAYS create a consumer record, and if project or user isn't -# specified (as was the case in 1.7) we should get the project/user -# corresponding to the CONF option for incomplete consumers when asking for the -# allocation information at a microversion that shows project/user information -# (1.12+) -- name: get with 1.12 microversion and check project and user are filled - GET: /allocations/$ENVIRON['CONSUMER_UUID'] - request_headers: - openstack-api-version: placement 1.12 - response_json_paths: - $.project_id: *default_incomplete_id - $.user_id: *default_incomplete_id diff --git a/nova/tests/functional/api/openstack/placement/gabbits/granular.yaml b/nova/tests/functional/api/openstack/placement/gabbits/granular.yaml deleted file mode 100644 index dc5f9f74c258..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/granular.yaml +++ /dev/null @@ -1,474 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Tests for granular resource requests - -fixtures: - # See the layout diagram in this fixture's docstring in ../fixtures.py - - GranularFixture - -defaults: - request_headers: - x-auth-token: admin - content-type: application/json - accept: application/json - openstack-api-version: placement 1.25 - -tests: - -- name: different groups hit with group_policy=none - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - resources2: MEMORY_MB:1024 - group_policy: none - status: 200 - response_json_paths: - $.allocation_requests.`len`: 3 - $.provider_summaries.`len`: 3 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - $.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources: - VCPU: - capacity: 8 - used: 0 - MEMORY_MB: - capacity: 4096 - used: 0 - $.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources: - VCPU: - capacity: 8 - used: 0 - MEMORY_MB: - capacity: 4096 - used: 0 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources: - VCPU: - capacity: 8 - used: 0 - MEMORY_MB: - capacity: 4096 - used: 0 - -- name: different groups miss with group_policy=isolate - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - resources2: MEMORY_MB:1024 - group_policy: isolate - status: 200 - response_json_paths: - # We asked for VCPU and MEMORY_MB to be satisfied by *different* - # providers, because they're in separate numbered request groups and - # group_policy=isolate. Since there are no sharing providers of these - # resources, we get no results. - $.allocation_requests.`len`: 0 - $.provider_summaries.`len`: 0 - -- name: resources combine - GET: /allocation_candidates - query_parameters: - resources: VCPU:3,MEMORY_MB:512 - resources1: VCPU:1,MEMORY_MB:1024 - resources2: VCPU:2 - group_policy: none - status: 200 - response_json_paths: - $.allocation_requests.`len`: 3 - $.provider_summaries.`len`: 3 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources: - VCPU: 6 - MEMORY_MB: 1536 - $.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources: - VCPU: 6 - MEMORY_MB: 1536 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources: - VCPU: 6 - MEMORY_MB: 1536 - -- name: group policy not required with only one numbered group - GET: /allocation_candidates?resources=VCPU:1&resources1=MEMORY_MB:2048 - status: 200 - response_json_paths: - $.allocation_requests.`len`: 3 - $.provider_summaries.`len`: 3 - -- name: disk sharing isolated - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1,MEMORY_MB:1024 - resources2: DISK_GB:100 - group_policy: isolate - status: 200 - response_json_paths: - # Here we've asked for VCPU and MEMORY_MB to be satisfied by the same - # provider - all three of our non-sharing providers can do that - and - # the DISK_GB to be satisfied by a *different* provider than the VCPU and - # MEMORY_MB. So we'll get all permutations where cn_* provide VCPU and - # MEMORY_MB and shr_disk_* provide the DISK_GB; but *no* results where - # DISK_GB is provided by the cn_*s themselves. - $.allocation_requests.`len`: 5 - $.provider_summaries.`len`: 5 - -- name: disk sharing non-isolated - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1,MEMORY_MB:1024 - resources2: DISK_GB:100 - group_policy: none - status: 200 - response_json_paths: - $.allocation_requests.`len`: 7 - $.provider_summaries.`len`: 5 - -- name: isolated ssd - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1,MEMORY_MB:1024 - resources2: DISK_GB:100 - required2: CUSTOM_DISK_SSD - group_policy: isolate - status: 200 - response_json_paths: - # We get candidates [cn_left + shr_disk_1] and [cn_middle + shr_disk_1] - # We don't get [cn_right + shr_disk_1] because they're not associated via aggregate. - # We don't get [cn_left/middle + shr_disk_2] because shr_disk_2 doesn't have the SSD trait - # We don't get [cn_left] or [cn_right] even though they have SSD disk because we asked to isolate - $.allocation_requests.`len`: 2 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - $.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources: - VCPU: 1 - MEMORY_MB: 1024 - # shr_disk_1 satisfies the disk for both allocation requests - $.allocation_requests..allocations["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB]: [100, 100] - $.provider_summaries.`len`: 3 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources: - VCPU: - capacity: 8 - used: 0 - MEMORY_MB: - capacity: 4096 - used: 0 - DISK_GB: - capacity: 500 - used: 0 - $.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources: - VCPU: - capacity: 8 - used: 0 - MEMORY_MB: - capacity: 4096 - used: 0 - $.provider_summaries["$ENVIRON['SHR_DISK_1']"].resources: - DISK_GB: - capacity: 1000 - used: 0 - -- name: no isolation, forbid ssd - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - resources2: DISK_GB:100 - required2: "!CUSTOM_DISK_SSD" - group_policy: none - status: 200 - response_json_paths: - # The permutations we *don't* get are: - # cn_right by itself because it has SSD - # - anything involving shr_disk_1 because it has SSD - $.allocation_requests.`len`: 4 - # We get two allocation requests involving cn_left - one where it - # satisfies the disk itself and one where shr_disk_2 provides it - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: [1, 1] - # We get one for [cn_middle + shr_disk_2] - it doesn't have disk to provide for itself - $.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources[VCPU]: 1 - # We get one for [cn_right + shr_disk_2] - cn_right can't provide its own - # disk due to the forbidden SSD trait. - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1 - # shr_disk_2 satisfies the disk for three out of the four allocation - # requests (all except the one where cn_left provides for itself) - $.allocation_requests..allocations["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB]: [100, 100, 100] - # Validate that we got the correct four providers in the summaries - $.provider_summaries.`len`: 4 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB][capacity]: 1000 - -- name: member_of filters - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - resources2: DISK_GB:100 - member_of2: $ENVIRON['AGGC'] - group_policy: none - status: 200 - response_json_paths: - $.allocation_requests.`len`: 1 - $.allocation_requests[0].allocations["$ENVIRON['CN_RIGHT']"].resources: - VCPU: 1 - DISK_GB: 100 - $.provider_summaries.`len`: 1 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[DISK_GB][capacity]: 500 - -- name: required, forbidden, member_of in - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - required1: "!HW_CPU_X86_SSE" - resources2: DISK_GB:100 - required2: CUSTOM_DISK_SSD - member_of2: in:$ENVIRON['AGGA'],$ENVIRON['AGGC'] - group_policy: none - status: 200 - response_json_paths: - # cn_middle won't appear (forbidden SSE trait) - # shr_disk_2 won't appear (required SSD trait is absent) - # [cn_left] won't be in the results (required SSD trait is absent) - # So we'll get: - # [cn_left, shr_disk_1] - # [cn_right] - $.allocation_requests.`len`: 2 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: 1 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1 - $.allocation_requests..allocations["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB]: 100 - $.provider_summaries.`len`: 3 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[DISK_GB][capacity]: 500 - $.provider_summaries["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB][capacity]: 1000 - -- name: multiple member_of - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - resources2: DISK_GB:100 - member_of2: $ENVIRON['AGGA'] - member_of2: in:$ENVIRON['AGGB'],$ENVIRON['AGGC'] - group_policy: isolate - status: 200 - response_json_paths: - # The member_of2 specifications say that the DISK_GB resource must come - # from a provider that's in aggA and also in (aggB and/or aggC). Only - # shr_disk_2 qualifies; so we'll get results anchored at cn_middle and - # cn_right. But note that we'll also get a result anchored at cn_left: - # it doesn't meet the member_of criteria, but it doesn't need to, since - # it's not providing the DISK_GB resource. - $.allocation_requests.`len`: 3 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: 1 - $.allocation_requests..allocations["$ENVIRON['CN_MIDDLE']"].resources[VCPU]: 1 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1 - $.allocation_requests..allocations["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB]: [100, 100, 100] - $.provider_summaries.`len`: 4 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_MIDDLE']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB][capacity]: 1000 - -- name: multiple disks, multiple networks - GET: /allocation_candidates - query_parameters: - resources1: VCPU:1 - resources2: VGPU:1 - required2: HW_GPU_API_DXVA - resources3: MEMORY_MB:1024 - resources4: DISK_GB:100 - required4: CUSTOM_DISK_SSD - resources5: DISK_GB:50 - required5: "!CUSTOM_DISK_SSD" - resources6: SRIOV_NET_VF:1,CUSTOM_NET_MBPS:1000 - resources7: SRIOV_NET_VF:2,CUSTOM_NET_MBPS:2000 - group_policy: none - # Breaking it down: - # => These could come from cn_left, cn_middle, or cn_right - # ?resources1=VCPU:1 - # &resources3=MEMORY_MB:1024 - # => But this limits us to cn_left and cn_right - # &resources2=VGPU:1&required2=HW_GPU_API_DXVA - # => Since we're not isolating, this SSD can come from cn_right or shr_disk_1 - # &resources4=DISK_GB:100&required4=CUSTOM_DISK_SSD - # => This non-SSD can come from cn_left or shr_disk_2 - # &resources5=DISK_GB:50&required5=!CUSTOM_DISK_SSD - # => These VFs and bandwidth can come from cn_left or shr_net. Since cn_left - # can't be an anchor for shr_net, these will always combine. - # &resources6=SRIOV_NET_VF:1,CUSTOM_NET_MBPS:1000 - # &resources7=SRIOV_NET_VF:2,CUSTOM_NET_MBPS:2000 - # => If we didn't do this, the separated VCPU/MEMORY_MB/VGPU resources would - # cause us to get no results - # &group_policy=none - status: 200 - response_json_paths: - # We have two permutations involving cn_left. - # - One where the non-SSD is satisfied by cn_left itself - # [cn_left(VCPU:1, MEMORY_MB:1024, VGPU:1, DISK_GB:50, SRIOV_NET_VF:3, CUSTOM_NET_MBPS:3000), - # shr_disk_1(DISK_GB:100)] - # - And one where the non-SSD is satisfied by shr_disk_2 - # [cn_left(VCPU:1, MEMORY_MB:1024, VGPU:1, SRIOV_NET_VF:3, CUSTOM_NET_MBPS:3000), - # shr_disk_1(DISK_GB:100), - # shr_disk_2(DISK_GB: 50)] - # There's only one result involving cn_right. - # - We must satisfy the SSD from cn_right and the non-SSD from shr_disk_2 - # - We must satisfy the network stuff from shr_net - # [cn_right(VCPU:1, MEMORY_MB:1024, VGPU:1, DISK_GB:100), - # shr_disk_2(DISK_GB:50), - # shr_net(SRIOV_NET_VF:3, CUSTOM_NET_MBPS:3000)] - $.allocation_requests.`len`: 3 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VCPU]: [1, 1] - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[MEMORY_MB]: [1024, 1024] - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VGPU]: [1, 1] - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[SRIOV_NET_VF]: [3, 3] - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[CUSTOM_NET_MBPS]: [3000, 3000] - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[DISK_GB]: 50 - # These come from the cn_left results - $.allocation_requests..allocations["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB]: [100, 100] - # One of these comes from the second cn_left result, the other from the cn_right result - $.allocation_requests..allocations["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB]: [50, 50] - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 1 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[MEMORY_MB]: 1024 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VGPU]: 1 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[DISK_GB]: 100 - $.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[SRIOV_NET_VF]: 3 - $.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[CUSTOM_NET_MBPS]: 3000 - # Just make sure we got the correct four providers in the summaries - $.provider_summaries.`len`: 5 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['SHR_DISK_1']"].resources[DISK_GB][capacity]: 1000 - $.provider_summaries["$ENVIRON['SHR_DISK_2']"].resources[DISK_GB][capacity]: 1000 - $.provider_summaries["$ENVIRON['SHR_NET']"].resources[SRIOV_NET_VF][capacity]: 16 - -- name: combining request groups exceeds capacity - GET: /allocation_candidates - query_parameters: - resources: VCPU:2,MEMORY_MB:2048,SRIOV_NET_VF:1,CUSTOM_NET_MBPS:2000 - resources1: SRIOV_NET_VF:1,CUSTOM_NET_MBPS:3000 - status: 200 - response_json_paths: - # CUSTOM_NET_MBPS of 2000 + 3000 = 5000 is too much for cn_left, but - # shr_net can accomodate it. - $.allocation_requests.`len`: 1 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[VCPU]: 2 - $.allocation_requests..allocations["$ENVIRON['CN_RIGHT']"].resources[MEMORY_MB]: 2048 - $.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[SRIOV_NET_VF]: 2 - $.allocation_requests..allocations["$ENVIRON['SHR_NET']"].resources[CUSTOM_NET_MBPS]: 5000 - $.provider_summaries.`len`: 2 - $.provider_summaries["$ENVIRON['CN_RIGHT']"].resources[VCPU][capacity]: 8 - $.provider_summaries["$ENVIRON['SHR_NET']"].resources[CUSTOM_NET_MBPS][capacity]: 40000 - -- name: combining request groups exceeds max_unit - GET: /allocation_candidates - query_parameters: - resources: VGPU:1 - resources1: VGPU:1 - resources2: VGPU:1 - group_policy: none - status: 200 - response_json_paths: - # VGPU of 1 + 1 + 1 = 3 exceeds max_unit on cn_right, but cn_left can handle it. - $.allocation_requests.`len`: 1 - $.allocation_requests..allocations["$ENVIRON['CN_LEFT']"].resources[VGPU]: 3 - $.provider_summaries.`len`: 1 - $.provider_summaries["$ENVIRON['CN_LEFT']"].resources[VGPU][capacity]: 8 - -################# -# Error scenarios -################# -- name: numbered resources bad microversion - GET: /allocation_candidates?resources=MEMORY_MB:1024&resources1=VCPU:1 - request_headers: - openstack-api-version: placement 1.24 - status: 400 - response_strings: - - Invalid query string parameters - - "'resources1' was unexpected" - -- name: numbered traits bad microversion - GET: /allocation_candidates?resources=MEMORY_MB:1024&required2=HW_CPU_X86_AVX2 - request_headers: - openstack-api-version: placement 1.24 - status: 400 - response_strings: - - Invalid query string parameters - - "'required2' was unexpected" - -- name: numbered member_of bad microversion - GET: /allocation_candidates?resources=MEMORY_MB:1024&member_of3=$ENVIRON['AGGB'] - request_headers: - openstack-api-version: placement 1.24 - status: 400 - response_strings: - - Invalid query string parameters - - "'member_of3' was unexpected" - -- name: group_policy bad microversion - GET: /allocation_candidates?resources=VCPU:1&group_policy=isolate - request_headers: - openstack-api-version: placement 1.24 - status: 400 - response_strings: - - Invalid query string parameters - - "'group_policy' was unexpected" - -- name: bogus numbering - GET: /allocation_candidates?resources01=VCPU:1 - status: 400 - response_strings: - - Invalid query string parameters - - "'resources01' does not match any of the regexes" - -- name: bogus suffix - GET: /allocation_candidates?resources1a=VCPU:1 - status: 400 - response_strings: - - Invalid query string parameters - - "'resources1a' does not match any of the regexes" - -- name: invalid group_policy value - GET: /allocation_candidates?resources=VCPU:1&group_policy=bogus - status: 400 - response_strings: - - Invalid query string parameters - - "'bogus' is not one of ['none', 'isolate']" - -- name: group_policy required when more than one numbered group - GET: /allocation_candidates?resources1=VCPU:1&resources2=VCPU:1 - status: 400 - response_strings: - - The \"group_policy\" parameter is required when specifying more than one \"resources{N}\" parameter. - -- name: orphaned traits keys - GET: /allocation_candidates?required=FOO&required1=BAR - status: 400 - response_strings: - - 'Found the following orphaned traits keys: required, required1' - -- name: orphaned member_of keys - GET: /allocation_candidates?member_of=$ENVIRON['AGGA']&member_of3=$ENVIRON['AGGC'] - status: 400 - response_strings: - - 'Found the following orphaned member_of keys: member_of, member_of3' - -- name: at least one request group required - GET: /allocation_candidates?group_policy=isolate - status: 400 - response_strings: - - At least one request group (`resources` or `resources{N}`) is required. diff --git a/nova/tests/functional/api/openstack/placement/gabbits/inventory-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/inventory-policy.yaml deleted file mode 100644 index 15d438643ada..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/inventory-policy.yaml +++ /dev/null @@ -1,85 +0,0 @@ -# This tests the individual CRUD operations on -# /resource_providers/{uuid}/inventories* using a non-admin user with an -# open policy configuration. The response validation is intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: post new resource provider - POST: /resource_providers - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - -- name: list inventories - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - response_json_paths: - $.resource_provider_generation: 0 - $.inventories: {} - -- name: post an inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - data: - resource_class: DISK_GB - total: 2048 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB - -- name: show inventory - GET: $LOCATION - status: 200 - -- name: update one inventory - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 1 - total: 2048 - reserved: 1024 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 200 - -- name: update all inventory - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 2 - inventories: - DISK_GB: - total: 2048 - reserved: 1024 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - VCPU: - total: 8 - status: 200 - -- name: delete specific inventory - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB - status: 204 - -- name: delete all inventory - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/inventory.yaml b/nova/tests/functional/api/openstack/placement/gabbits/inventory.yaml deleted file mode 100644 index e1a6abd6c904..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/inventory.yaml +++ /dev/null @@ -1,812 +0,0 @@ -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - -tests: -- name: inventories for missing provider - GET: /resource_providers/7260669a-e3d4-4867-aaa7-683e2ab6958c/inventories - status: 404 - response_strings: - - No resource provider with uuid 7260669a-e3d4-4867-aaa7-683e2ab6958c found - response_json_paths: - $.errors[0].title: Not Found - -- name: delete all inventory for missing resource provider - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: post new resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - -- name: get empty inventories - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - response_json_paths: - $.resource_provider_generation: 0 - $.inventories: {} - -- name: post a conflicting capacity inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 256 - reserved: 512 - status: 400 - response_strings: - - Unable to create inventory for resource provider - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with no total specified - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - status: 400 - response_strings: - - JSON does not validate - - "'total' is a required property" - -- name: post a negative inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: -1 - status: 400 - response_strings: - - JSON does not validate - - -1 is less than the minimum of 1 - -- name: post an inventory with invalid total - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 0 - reserved: 512 - min_unit: 1 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "JSON does not validate: 0 is less than the minimum of 1" - - "Failed validating 'minimum' in schema['properties']['total']" - -- name: post an inventory invalid min_unit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - reserved: 512 - min_unit: 0 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "JSON does not validate: 0 is less than the minimum of 1" - - "Failed validating 'minimum' in schema['properties']['min_unit']" - -- name: post an inventory invalid max_unit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - reserved: 512 - min_unit: 10 - max_unit: 0 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "JSON does not validate: 0 is less than the minimum of 1" - - "Failed validating 'minimum' in schema['properties']['max_unit']" - -- name: post an inventory invalid step_size - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 0 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "JSON does not validate: 0 is less than the minimum of 1" - - "Failed validating 'minimum' in schema['properties']['step_size']" - -- name: post an inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB - response_json_paths: - $.resource_provider_generation: 1 - $.total: 2048 - $.reserved: 512 - -- name: get that inventory - GET: $LOCATION - status: 200 - request_headers: - # set microversion to 1.15 to get timestamp headers - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_json_paths: - $.resource_provider_generation: 1 - $.total: 2048 - $.reserved: 512 - $.min_unit: 10 - $.max_unit: 1024 - $.step_size: 10 - $.allocation_ratio: 1.0 - -- name: get inventory v1.14 no cache headers - GET: $LAST_URL - status: 200 - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - cache-control - - last-modified - -- name: modify the inventory - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 1 - total: 2048 - reserved: 1024 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 200 - response_headers: - content-type: /application/json/ - response_json_paths: - $.reserved: 1024 - -- name: confirm inventory change - GET: $LAST_URL - response_json_paths: - $.resource_provider_generation: 2 - $.total: 2048 - $.reserved: 1024 - -- name: modify inventory invalid generation - PUT: $LAST_URL - request_headers: - content-type: application/json - openstack-api-version: placement 1.23 - data: - resource_provider_generation: 5 - total: 2048 - status: 409 - response_strings: - - resource provider generation conflict - response_json_paths: - $.errors[0].title: Conflict - $.errors[0].code: placement.concurrent_update - -- name: modify inventory no such resource class in inventory - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories/MEMORY_MB - request_headers: - content-type: application/json - data: - resource_provider_generation: 2 - total: 2048 - status: 400 - response_strings: - - No inventory record with resource class - response_json_paths: - $.errors[0].title: Bad Request - -- name: modify inventory invalid data - desc: This should 400 because reserved is greater than total - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 2 - total: 2048 - reserved: 4096 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - Unable to update inventory for resource provider $ENVIRON['RP_UUID'] - response_json_paths: - $.errors[0].title: Bad Request - -- name: put inventory bad form - desc: This should 400 because reserved is greater than total - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - house: red - car: blue - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: post inventory malformed json - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: '{"foo": }' - status: 400 - response_strings: - - Malformed JSON - response_json_paths: - $.errors[0].title: Bad Request - -- name: post inventory bad syntax schema - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: bad_class - total: 2048 - status: 400 - response_json_paths: - $.errors[0].title: Bad Request - -- name: post inventory bad resource class - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: NO_CLASS_14 - total: 2048 - status: 400 - response_strings: - - No such resource class NO_CLASS_14 - response_json_paths: - $.errors[0].title: Bad Request - -- name: post inventory duplicated resource class - desc: DISK_GB was already created above - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - status: 409 - response_strings: - - Update conflict - response_json_paths: - $.errors[0].title: Conflict - -- name: get list of inventories - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - # set microversion to 1.15 to get timestamp headers - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_json_paths: - $.resource_provider_generation: 2 - $.inventories.DISK_GB.total: 2048 - $.inventories.DISK_GB.reserved: 1024 - -- name: delete the inventory - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB - status: 204 - -- name: get now empty inventories - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - response_json_paths: - $.resource_provider_generation: 3 - $.inventories: {} - -- name: post new disk inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 1024 - status: 201 - -- name: post new ipv4 address inventory - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: IPV4_ADDRESS - total: 255 - reserved: 2 - status: 201 - -- name: list both those inventories - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - response_json_paths: - $.resource_provider_generation: 5 - $.inventories.DISK_GB.total: 1024 - $.inventories.IPV4_ADDRESS.total: 255 - -- name: post ipv4 address inventory again - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: IPV4_ADDRESS - total: 255 - reserved: 2 - status: 409 - response_json_paths: - $.errors[0].title: Conflict - -- name: delete inventory - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS - status: 204 - response_forbidden_headers: - - content-type - -- name: delete inventory again - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS - status: 404 - response_strings: - - No inventory of class IPV4_ADDRESS found for delete - response_json_paths: - $.errors[0].title: Not Found - -- name: get missing inventory class - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS - status: 404 - response_strings: - - No inventory of class IPV4_ADDRESS for $ENVIRON['RP_UUID'] - response_json_paths: - $.errors[0].title: Not Found - -- name: get invalid inventory class - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories/HOUSE - status: 404 - response_strings: - - No inventory of class HOUSE for $ENVIRON['RP_UUID'] - response_json_paths: - $.errors[0].title: Not Found - -- name: get missing resource provider inventory - GET: /resource_providers/2e1dda56-8b18-4fb9-8c5c-3125891b7143/inventories/VCPU - status: 404 - -- name: create another resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: disk-network - status: 201 - -- name: put all inventory - PUT: $LOCATION/inventories - request_headers: - content-type: application/json - # set microversion to 1.15 to get timestamp headers - openstack-api-version: placement 1.15 - data: - resource_provider_generation: 0 - inventories: - IPV4_ADDRESS: - total: 253 - DISK_GB: - total: 1024 - status: 200 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_json_paths: - $.resource_provider_generation: 1 - $.inventories.IPV4_ADDRESS.total: 253 - $.inventories.IPV4_ADDRESS.reserved: 0 - $.inventories.DISK_GB.total: 1024 - $.inventories.DISK_GB.allocation_ratio: 1.0 - -- name: check both inventory classes - GET: $LAST_URL - response_json_paths: - $.resource_provider_generation: 1 - $.inventories.DISK_GB.total: 1024 - $.inventories.IPV4_ADDRESS.total: 253 - -- name: check one inventory class - GET: $LAST_URL/DISK_GB - response_json_paths: - $.total: 1024 - -- name: put all inventory bad generation - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - openstack-api-version: placement 1.23 - data: - resource_provider_generation: 99 - inventories: - IPV4_ADDRESS: - total: 253 - status: 409 - response_strings: - - resource provider generation conflict - response_json_paths: - $.errors[0].title: Conflict - $.errors[0].code: placement.concurrent_update - -- name: put all inventory unknown resource class - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 6 - inventories: - HOUSE: - total: 253 - status: 400 - response_strings: - - Unknown resource class in inventory - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with total exceed max limit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2147483648 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with reserved exceed max limit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 1024 - reserved: 2147483648 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with min_unit exceed max limit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 1024 - reserved: 512 - min_unit: 2147483648 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with max_unit exceed max limit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 1024 - reserved: 512 - min_unit: 10 - max_unit: 2147483648 - step_size: 10 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with step_size exceed max limit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 1024 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 2147483648 - allocation_ratio: 1.0 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: post an inventory with allocation_ratio exceed max limit - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 1024 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 3.40282e+39 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: modify the inventory with total exceed max limit - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 1 - inventories: - DISK_GB: - total: 2147483648 - reserved: 512 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: modify the inventory with allocation_ratio exceed max limit - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 1 - inventories: - DISK_GB: - total: 1024 - reserved: 512 - allocation_ratio: 3.40282e+39 - status: 400 - response_strings: - - "Failed validating 'maximum'" - response_json_paths: - $.errors[0].title: Bad Request - -# NOTE(cdent): The generation is 6 now, based on the activity at -# the start of this file. -- name: put all inventory bad capacity - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 6 - inventories: - IPV4_ADDRESS: - total: 253 - reserved: 512 - status: 400 - response_strings: - - Unable to update inventory - - greater than or equal to total - response_json_paths: - $.errors[0].title: Bad Request - -- name: put all inventory zero capacity old microversion - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - resource_provider_generation: 6 - inventories: - IPV4_ADDRESS: - total: 253 - reserved: 253 - status: 400 - response_strings: - - Unable to update inventory - - greater than or equal to total - response_json_paths: - $.errors[0].title: Bad Request - -- name: put inventory with reserved equal to total - PUT: $LAST_URL - request_headers: - content-type: application/json - openstack-api-version: placement 1.26 - data: - resource_provider_generation: 6 - inventories: - IPV4_ADDRESS: - total: 253 - reserved: 253 - status: 200 - -- name: put all inventory bad capacity in new microversion - PUT: $LAST_URL - request_headers: - content-type: application/json - openstack-api-version: placement 1.26 - data: - resource_provider_generation: 7 - inventories: - IPV4_ADDRESS: - total: 253 - reserved: 512 - status: 400 - response_strings: - - Unable to update inventory - - greater than total - response_json_paths: - $.errors[0].title: Bad Request - -- name: put one inventory zero capacity old microversion - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories/IPV4_ADDRESS - request_headers: - content-type: application/json - data: - resource_provider_generation: 7 - total: 253 - reserved: 253 - status: 400 - response_strings: - - Unable to update inventory - - greater than or equal to total - response_json_paths: - $.errors[0].title: Bad Request - -- name: put one inventory with reserved equal to total new microversion - PUT: $LAST_URL - request_headers: - content-type: application/json - openstack-api-version: placement 1.26 - data: - resource_provider_generation: 7 - total: 512 - reserved: 512 - status: 200 - -- name: delete all inventory bad generation - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 99 - inventories: - IPV4_ADDRESS: - total: 253 - status: 409 - response_strings: - - resource provider generation conflict - -- name: delete all inventory - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - openstack-api-version: placement 1.5 - status: 204 - -- name: delete empty inventories - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - openstack-api-version: placement 1.5 - status: 204 - -- name: get inventories after deletions - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - response_json_paths: - $.resource_provider_generation: 10 - $.inventories: {} - -- name: post an inventory again - POST: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_class: DISK_GB - total: 2048 - reserved: 512 - min_unit: 10 - max_unit: 1024 - step_size: 10 - allocation_ratio: 1.0 - status: 201 - response_headers: - location: $SCHEME://$NETLOC/resource_providers/$ENVIRON['RP_UUID']/inventories/DISK_GB - response_json_paths: - $.resource_provider_generation: 11 - $.total: 2048 - $.reserved: 512 - -- name: delete all inventory with put - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - content-type: application/json - openstack-api-version: placement 1.4 - data: - resource_provider_generation: 11 - inventories: {} - response_json_paths: - $.resource_provider_generation: 12 - $.inventories: {} - status: 200 - -- name: get generation after deletion - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - response_json_paths: - $.resource_provider_generation: 12 - $.inventories: {} - -- name: delete inventories earlier version - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - openstack-api-version: placement 1.4 - status: 405 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/microversion-bug-1724065.yaml b/nova/tests/functional/api/openstack/placement/gabbits/microversion-bug-1724065.yaml deleted file mode 100644 index 718faaf02482..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/microversion-bug-1724065.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Test launchpad bug https://bugs.launchpad.net/nova/+bug/1724065 - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: user - -tests: - -# min version from start of placement time is 1.0 -# Without the fix, this results in a 500 with an 'HTTP_ACCEPT' -# KeyError. -- name: no accept header and out of range microversion - GET: /resource_providers - request_headers: - openstack-api-version: placement 0.9 - status: 406 - response_strings: - - Unacceptable version header - diff --git a/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml b/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml deleted file mode 100644 index fa73a3979392..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml +++ /dev/null @@ -1,90 +0,0 @@ -# Tests to build microversion functionality behavior and confirm -# it is present and behaving as expected. - -fixtures: - - APIFixture - -defaults: - request_headers: - accept: application/json - x-auth-token: user - -tests: -- name: root has microversion header - GET: / - response_headers: - vary: /openstack-api-version/ - openstack-api-version: /^placement \d+\.\d+$/ - -- name: root has microversion info - GET: / - response_json_paths: - $.versions[0].max_version: /^\d+\.\d+$/ - $.versions[0].min_version: /^\d+\.\d+$/ - $.versions[0].id: v1.0 - $.versions[0].status: CURRENT - $.versions[0].links[?rel = 'self'].href: '' - -- name: unavailable microversion raises 406 - GET: / - request_headers: - openstack-api-version: placement 0.5 - status: 406 - response_headers: - content-type: /application/json/ - response_json_paths: - $.errors.[0].title: Not Acceptable - $.errors.[0].max_version: /^\d+\.\d+$/ - $.errors.[0].min_version: /^\d+\.\d+$/ - response_strings: - - "Unacceptable version header: 0.5" - response_json_paths: - $.errors[0].title: Not Acceptable - -- name: latest microversion is 1.30 - GET: / - request_headers: - openstack-api-version: placement latest - response_headers: - vary: /openstack-api-version/ - openstack-api-version: placement 1.30 - -- name: other accept header bad version - GET: / - request_headers: - accept: text/html - openstack-api-version: placement 0.5 - status: 406 - response_headers: - content-type: /text/html/ - response_strings: - - "Unacceptable version header: 0.5" - -- name: bad format string raises 400 - GET: / - request_headers: - openstack-api-version: placement pony.horse - status: 400 - response_strings: - - "invalid version string: pony.horse" - response_json_paths: - $.errors[0].title: Bad Request - -- name: bad format multidot raises 400 - GET: / - request_headers: - openstack-api-version: placement 1.2.3 - status: 400 - response_strings: - - "invalid version string: 1.2.3" - response_json_paths: - $.errors[0].title: Bad Request - -- name: error in application produces microversion headers - desc: we do not want xml - POST: / - request_headers: - content-type: application/xml - status: 405 - response_headers: - openstack-api-version: placement 1.0 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/non-cors.yaml b/nova/tests/functional/api/openstack/placement/gabbits/non-cors.yaml deleted file mode 100644 index b0b974cc52f7..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/non-cors.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Confirm that things work as intended when CORS is not configured. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: user - -tests: -- name: options request not allowed - OPTIONS: / - request_headers: - origin: http://valid.example.com - access-control-request-method: GET - status: 405 - -- name: get request no cors headers - GET: / - request_headers: - origin: http://valid.example.com - access-control-request-method: GET - status: 200 - response_forbidden_headers: - - access-control-allow-origin diff --git a/nova/tests/functional/api/openstack/placement/gabbits/reshaper-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/reshaper-policy.yaml deleted file mode 100644 index 8e1674626146..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/reshaper-policy.yaml +++ /dev/null @@ -1,20 +0,0 @@ -# This tests POSTs to /reshaper using a non-admin user with an open policy -# configuration. The response is a 400 because of bad content, meaning we got -# past policy enforcement. If policy was being enforced we'd get a 403. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: attempt reshape - POST: /reshaper - data: - bad: content - status: 400 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/reshaper.yaml b/nova/tests/functional/api/openstack/placement/gabbits/reshaper.yaml deleted file mode 100644 index 786d430144f8..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/reshaper.yaml +++ /dev/null @@ -1,558 +0,0 @@ -# /reshaper provides a way to atomically move inventory and allocations from -# one resource provider to another, often from a root provider to a new child. - -fixtures: - - AllocationFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.30 - -tests: - -- name: reshaper is POST only - GET: /reshaper - status: 405 - response_headers: - allow: POST - -- name: reshaper requires admin not user - POST: /reshaper - request_headers: - x-auth-token: user - status: 403 - -- name: reshaper not there old - POST: /reshaper - request_headers: - openstack-api-version: placement 1.29 - status: 404 - -- name: very invalid 400 - POST: /reshaper - status: 400 - data: - cows: moo - response_strings: - - JSON does not validate - -- name: missing allocations - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 0 - inventories: - VCPU: - total: 1 - status: 400 - -# There are existing allocations on RP_UUID (created by the AllocationFixture). -# As the code is currently we cannot null out those allocations from reshaper -# because the allocations identify nothing (replace_all() is a no op). -- name: empty allocations inv in use - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - VCPU: - total: 1 - allocations: {} - status: 409 - response_json_paths: - $.errors[0].code: placement.inventory.inuse - -# Again, with the existing allocations on RP_UUID being held by CONSUMER_ID, -# not INSTANCE_ID, when we try to allocate here, we don't have room. This -# is a correctly invalid operation as to be actually reshaping here, we -# would be needing to move the CONSUMER_ID allocations in this call (and -# setting the inventory to something that could accomodate them). -- name: with allocations - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - VCPU: - total: 1 - allocations: - $ENVIRON['INSTANCE_UUID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 1 - consumer_generation: null - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 409 - response_strings: - - Unable to allocate inventory - -- name: bad rp gen - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 4 - inventories: - VCPU: - total: 1 - allocations: {} - status: 409 - response_strings: - - resource provider generation conflict - - 'actual: 5, given: 4' - -- name: bad consumer gen - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - VCPU: - total: 1 - allocations: - $ENVIRON['INSTANCE_UUID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 1 - # The correct generation here is null, because INSTANCE_UUID - # represents a new consumer at this point. - consumer_generation: 99 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - status: 409 - response_strings: - - consumer generation conflict - -- name: create a child provider - POST: /resource_providers - data: - uuid: $ENVIRON['ALT_RP_UUID'] - name: $ENVIRON['ALT_RP_NAME'] - parent_provider_uuid: $ENVIRON['RP_UUID'] - -# This and subsequent error checking tests are modelled on the successful -# test which is at the end of this file. Using the same data, with minor -# adjustments, so that the cause of failure is clear. - -- name: move to bad child 400 - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - DISK_GB: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 1200 - # This resource provider does not exist. - '39bafc00-3fff-444d-b87a-2ead3f866e05': - resource_provider_generation: 0 - inventories: - VCPU: - total: 10 - max_unit: 8 - # these consumer generations are all 1 because they have - # previously allocated - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['CONSUMER_ID']: - allocations: - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 1 - status: 400 - response_json_paths: - $.errors[0].code: placement.resource_provider.not_found - -- name: poorly formed inventory 400 - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - DISK_GB: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 1200 - bad_field: moo - $ENVIRON['ALT_RP_UUID']: - resource_provider_generation: 0 - inventories: - VCPU: - total: 10 - max_unit: 8 - # these consumer generations are all 1 because they have - # previously allocated - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['CONSUMER_ID']: - allocations: - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 1 - status: 400 - response_strings: - - JSON does not validate - - "'bad_field' was unexpected" - -- name: poorly formed allocation 400 - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - DISK_GB: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 1200 - $ENVIRON['ALT_RP_UUID']: - resource_provider_generation: 0 - inventories: - VCPU: - total: 10 - max_unit: 8 - # these consumer generations are all 1 because they have - # previously allocated - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - # This bad field will cause a failure in the schema. - bad_field: moo - $ENVIRON['CONSUMER_ID']: - allocations: - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 1 - status: 400 - response_strings: - - JSON does not validate - - "'bad_field' was unexpected" - -- name: target resource class not found - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - # not a real inventory, but valid form - DISK_OF_STEEL: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 1200 - $ENVIRON['ALT_RP_UUID']: - resource_provider_generation: 0 - inventories: - VCPU: - total: 10 - max_unit: 8 - # these consumer generations are all 1 because they have - # previously allocated - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['CONSUMER_ID']: - allocations: - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 1 - status: 400 - response_strings: - - No such resource class DISK_OF_STEEL - -- name: move bad allocation 409 - desc: max unit on disk gb inventory violated - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - DISK_GB: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 600 - $ENVIRON['ALT_RP_UUID']: - resource_provider_generation: 0 - inventories: - VCPU: - total: 10 - max_unit: 8 - # these consumer generations are all 1 because they have - # previously allocated - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - # Violates max unit - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['CONSUMER_ID']: - allocations: - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 1 - status: 409 - response_strings: - - Unable to allocate inventory - -# This is a successful reshape using information as it was established above -# or in the AllocationFixture. A non-obvious fact of this test is that it -# confirms that resource provider and consumer generations are rolled back -# when failures occur, as in the tests above. -- name: move vcpu inventory and allocations to child - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 5 - inventories: - DISK_GB: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 1200 - $ENVIRON['ALT_RP_UUID']: - resource_provider_generation: 0 - inventories: - VCPU: - total: 10 - max_unit: 8 - # these consumer generations are all 1 because they have - # previously allocated - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['CONSUMER_ID']: - allocations: - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 1 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - $ENVIRON['ALT_RP_UUID']: - resources: - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 1 - status: 204 - -- name: get usages on parent after move - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_json_paths: - $.usages: - DISK_GB: 1020 - $.resource_provider_generation: 8 - -- name: get usages on child after move - GET: /resource_providers/$ENVIRON['ALT_RP_UUID']/usages - response_json_paths: - $.usages: - VCPU: 9 - $.resource_provider_generation: 3 - -# Now move some of the inventory back to the original provider, and put all -# the allocations under two new consumers. This is an artificial test to -# exercise new consumer creation. -- name: consolidate inventory and allocations - POST: /reshaper - data: - inventories: - $ENVIRON['RP_UUID']: - resource_provider_generation: 8 - inventories: - DISK_GB: - total: 2048 - step_size: 10 - min_unit: 10 - max_unit: 1200 - VCPU: - total: 10 - max_unit: 8 - $ENVIRON['ALT_RP_UUID']: - resource_provider_generation: 3 - inventories: {} - allocations: - $ENVIRON['CONSUMER_0']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 1000 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 2 - '7bd2e864-0415-445c-8fc2-328520ef7642': - allocations: - $ENVIRON['RP_UUID']: - resources: - VCPU: 8 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: null - '2dfa608c-cecb-4fe0-a1bb-950015fa731f': - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - VCPU: 1 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: null - $ENVIRON['CONSUMER_ID']: - allocations: {} - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['USER_ID'] - consumer_generation: 2 - $ENVIRON['ALT_CONSUMER_ID']: - allocations: - $ENVIRON['RP_UUID']: - resources: - DISK_GB: 20 - project_id: $ENVIRON['PROJECT_ID'] - user_id: $ENVIRON['ALT_USER_ID'] - consumer_generation: 2 - status: 204 - -- name: get usages on parent after move back - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_json_paths: - $.usages: - VCPU: 9 - DISK_GB: 1040 - $.resource_provider_generation: 11 - -- name: get usages on child after move back - GET: /resource_providers/$ENVIRON['ALT_RP_UUID']/usages - response_json_paths: - $.usages: {} - $.resource_provider_generation: 5 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-class-in-use.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-class-in-use.yaml deleted file mode 100644 index 00125389262a..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-class-in-use.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# A sequence of tests that confirms that a resource class in use -# cannot be deleted. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - # We need version 1.11 as the PUT /allocations below is - # using the < 1.12 data format. - openstack-api-version: placement 1.11 - -tests: - -- name: create a resource provider - POST: /resource_providers - data: - name: an rp - status: 201 - -- name: get resource provider - GET: $LOCATION - status: 200 - -- name: create a resource class - PUT: /resource_classes/CUSTOM_GOLD - status: 201 - -- name: add inventory to an rp - PUT: /resource_providers/$HISTORY['get resource provider'].$RESPONSE['$.uuid']/inventories - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 24 - CUSTOM_GOLD: - total: 5 - status: 200 - -- name: allocate some of it - PUT: /allocations/6d9f83db-6eb5-49f6-84b0-5d03c6aa9fc8 - data: - allocations: - - resource_provider: - uuid: $HISTORY['get resource provider'].$RESPONSE['$.uuid'] - resources: - VCPU: 5 - CUSTOM_GOLD: 1 - project_id: 42a32c07-3eeb-4401-9373-68a8cdca6784 - user_id: 66cb2f29-c86d-47c3-8af5-69ae7b778c70 - status: 204 - -- name: fail delete resource class allocations - DELETE: /resource_classes/CUSTOM_GOLD - status: 409 - response_strings: - - Error in delete resource class - - Class is in use in inventory - -- name: delete the allocation - DELETE: $HISTORY['allocate some of it'].$URL - status: 204 - -- name: fail delete resource class inventory - DELETE: /resource_classes/CUSTOM_GOLD - status: 409 - response_strings: - - Error in delete resource class - - Class is in use in inventory - -- name: delete the inventory - DELETE: $HISTORY['add inventory to an rp'].$URL - status: 204 - -- name: delete resource class - DELETE: /resource_classes/CUSTOM_GOLD - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-6.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-6.yaml deleted file mode 100644 index 9975aa654f7b..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-6.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# Confirm that 1.7 behavior of PUT resource classes is not in -# microversion 1.6. -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.6 - -tests: - -- name: bodiless put - PUT: /resource_classes/CUSTOM_COW - status: 400 - response_strings: - # We don't check much of this string because it is different - # between python 2 and 3. - - "Malformed JSON:" diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-7.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-7.yaml deleted file mode 100644 index 1c4a5eb68bea..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-1-7.yaml +++ /dev/null @@ -1,49 +0,0 @@ -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.7 - -tests: - -- name: create new custom class with put - PUT: /resource_classes/CUSTOM_COW - status: 201 - response_headers: - location: //resource_classes/CUSTOM_COW/ - -- name: verify that class with put - PUT: /resource_classes/CUSTOM_COW - status: 204 - response_headers: - location: //resource_classes/CUSTOM_COW/ - -- name: fail to put non custom class - PUT: /resource_classes/COW - status: 400 - response_strings: - - "Failed validating 'pattern'" - -- name: try to put standard class - PUT: /resource_classes/VCPU - status: 400 - response_strings: - - "Failed validating 'pattern'" - -- name: try to put too long class - PUT: /resource_classes/CUSTOM_SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS - status: 400 - response_strings: - - "Failed validating 'maxLength'" - -- name: post to create still works - POST: /resource_classes - data: - name: CUSTOM_SHEEP - status: 201 - response_headers: - location: //resource_classes/CUSTOM_SHEEP/ diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-last-modified.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-last-modified.yaml deleted file mode 100644 index e60244777770..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-last-modified.yaml +++ /dev/null @@ -1,117 +0,0 @@ -# Confirm the behavior and presence of last-modified headers for resource -# classes across multiple microversions. -# -# We have the following routes, with associated microversion, and bodies. -# -# '/resource_classes': { -# 'GET': resource_class.list_resource_classes, -# v1.2, body -# 'POST': resource_class.create_resource_class -# v1.2, no body -# }, -# '/resource_classes/{name}': { -# 'GET': resource_class.get_resource_class, -# v1.2, body -# 'PUT': resource_class.update_resource_class, -# v1.2, body, but time's arrow -# v1.7, no body -# 'DELETE': resource_class.delete_resource_class, -# v1.2, no body -# }, -# -# This means that in 1.15 we only expect last-modified headers for -# the two GET requests, for the other requests we should confirm it -# is not there. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - openstack-api-version: placement 1.15 - -tests: - -- name: get resource classes - desc: last modified is now with standards only - GET: /resource_classes - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: create a custom class - PUT: /resource_classes/CUSTOM_MOO_MACHINE - status: 201 - response_forbidden_headers: - - last-modified - - cache-control - -- name: get custom class - GET: $LAST_URL - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: get standard class - GET: /resource_classes/VCPU - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: post a resource class - POST: /resource_classes - data: - name: CUSTOM_ALPHA - status: 201 - response_forbidden_headers: - - last-modified - - cache-control - -- name: get resource classes including custom - desc: last modified will still be now with customs because of standards - GET: /resource_classes - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: put a resource class 1.6 microversion - PUT: /resource_classes/CUSTOM_MOO_MACHINE - request_headers: - openstack-api-version: placement 1.6 - data: - name: CUSTOM_BETA - status: 200 - response_forbidden_headers: - - last-modified - - cache-control - -- name: get resource classes 1.14 microversion - GET: /resource_classes - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - last-modified - - cache-control - -- name: get standard class 1.14 microversion - GET: /resource_classes/VCPU - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - last-modified - - cache-control - -- name: get custom class 1.14 microversion - GET: $LAST_URL - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - last-modified - - cache-control diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-policy.yaml deleted file mode 100644 index 1d90db0d6f3a..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes-policy.yaml +++ /dev/null @@ -1,40 +0,0 @@ -# This tests the individual CRUD operations on /resource_classes -# using a non-admin user with an open policy configuration. The -# response validation is intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: list resource classes - GET: /resource_classes - response_json_paths: - $.resource_classes.`len`: 14 # Number of standard resource classes - -- name: create resource class - POST: /resource_classes - data: - name: CUSTOM_RES_CLASS_POLICY - status: 201 - response_headers: - location: //resource_classes/CUSTOM_RES_CLASS_POLICY/ - -- name: show resource class - GET: /resource_classes/CUSTOM_RES_CLASS_POLICY - response_json_paths: - $.name: CUSTOM_RES_CLASS_POLICY - -- name: update resource class - PUT: /resource_classes/CUSTOM_NEW_CLASS_POLICY - status: 201 - -- name: delete resource class - DELETE: /resource_classes/CUSTOM_NEW_CLASS_POLICY - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-classes.yaml deleted file mode 100644 index 638b03a33c7c..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-classes.yaml +++ /dev/null @@ -1,325 +0,0 @@ -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: test microversion masks resource-classes endpoint for list with 404 - GET: /resource_classes - request_headers: - openstack-api-version: placement 1.1 - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: test microversion masks resource-classes endpoint for create with 404 - desc: we want to get a 404 even if content-type is correct - POST: /resource_classes - request_headers: - openstack-api-version: placement 1.1 - content-type: application/json - data: - name: CUSTOM_NFV_BAR - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: test microversion mask when wrong content type - desc: we want to get a 404 before a 415 - POST: /resource_classes - request_headers: - openstack-api-version: placement 1.1 - content-type: text/plain - data: data - status: 404 - -- name: test wrong content type - desc: we want to get a 415 when bad content type - POST: /resource_classes - request_headers: - openstack-api-version: placement 1.2 - content-type: text/plain - data: data - status: 415 - -- name: what is at resource classes - GET: /resource_classes - response_json_paths: - response_json_paths: - $.resource_classes.`len`: 14 # Number of standard resource classes - $.resource_classes[0].name: VCPU - -- name: non admin forbidden - GET: /resource_classes - request_headers: - x-auth-token: user - accept: application/json - status: 403 - response_json_paths: - $.errors[0].title: Forbidden - -- name: post invalid non json - POST: /resource_classes - request_headers: - accept: text/plain - content-type: application/json - data: - name: FOO - status: 400 - response_strings: - - JSON does not validate - -- name: post illegal characters in name - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: CUSTOM_Illegal&@!Name? - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: post new resource class - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: $ENVIRON['CUSTOM_RES_CLASS'] - status: 201 - response_headers: - location: //resource_classes/$ENVIRON['CUSTOM_RES_CLASS']/ - response_forbidden_headers: - - content-type - -- name: try to create same again - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: $ENVIRON['CUSTOM_RES_CLASS'] - status: 409 - response_strings: - - Conflicting resource class already exists - response_json_paths: - $.errors[0].title: Conflict - -- name: confirm the correct post - GET: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - content-type: application/json - response_json_paths: - $.name: $ENVIRON['CUSTOM_RES_CLASS'] - $.links[?rel = "self"].href: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - -- name: test microversion masks resource-classes endpoint for show with 404 - GET: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - openstack-api-version: placement 1.1 - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: get resource class works with no accept - GET: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - content-type: application/json - response_headers: - content-type: /application/json/ - response_json_paths: - $.name: $ENVIRON['CUSTOM_RES_CLASS'] - -- name: list resource classes after addition of custom res class - GET: /resource_classes - response_json_paths: - $.resource_classes.`len`: 15 # 14 standard plus 1 custom - $.resource_classes[14].name: $ENVIRON['CUSTOM_RES_CLASS'] - $.resource_classes[14].links[?rel = "self"].href: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - -- name: update standard resource class bad json - PUT: /resource_classes/VCPU - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: VCPU_ALTERNATE - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: update standard resource class to custom - desc: standard classes cannot be updated - PUT: /resource_classes/VCPU - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: $ENVIRON['CUSTOM_RES_CLASS'] - status: 400 - response_strings: - - Cannot update standard resource class VCPU - response_json_paths: - $.errors[0].title: Bad Request - -- name: update custom resource class to standard resource class name - PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: VCPU - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: post another custom resource class - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: CUSTOM_NFV_FOO - status: 201 - -- name: update custom resource class to already existing custom resource class name - PUT: /resource_classes/CUSTOM_NFV_FOO - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: $ENVIRON['CUSTOM_RES_CLASS'] - status: 409 - response_strings: - - Resource class already exists - - $ENVIRON['CUSTOM_RES_CLASS'] - response_json_paths: - $.errors[0].title: Conflict - -- name: test microversion masks resource-classes endpoint for update with 404 - PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - openstack-api-version: placement 1.1 - content-type: application/json - data: - name: CUSTOM_NFV_BAR - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: update custom resource class with additional properties - PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: CUSTOM_NFV_BAR - additional: additional - status: 400 - response_strings: - - Additional properties are not allowed - -- name: update custom resource class - PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: CUSTOM_NFV_BAR - status: 200 - response_json_paths: - $.name: CUSTOM_NFV_BAR - $.links[?rel = "self"].href: /resource_classes/CUSTOM_NFV_BAR - -- name: delete standard resource class - DELETE: /resource_classes/VCPU - status: 400 - response_strings: - - Cannot delete standard resource class - response_json_paths: - $.errors[0].title: Bad Request - -- name: test microversion masks resource-classes endpoint for delete with 404 - DELETE: /resource_classes/CUSTOM_NFV_BAR - request_headers: - openstack-api-version: placement 1.1 - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: delete custom resource class - DELETE: /resource_classes/CUSTOM_NFV_BAR - status: 204 - -- name: 404 on deleted resource class - DELETE: $LAST_URL - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: post malformed json as json - POST: /resource_classes - request_headers: - content-type: application/json - data: '{"foo": }' - status: 400 - response_strings: - - 'Malformed JSON:' - response_json_paths: - $.errors[0].title: Bad Request - -- name: post bad resource class name IRON_NFV - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: IRON_NFV # Doesn't start with CUSTOM_ - status: 400 - response_strings: - - JSON does not validate - response_json_paths: - $.errors[0].title: Bad Request - -- name: try to create resource class with name exceed max characters - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: &name_exceeds_max_length_check CUSTOM_THIS_IS_A_LONG_TEXT_OF_LENGTH_256_CHARACTERSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS - status: 400 - response_strings: - - "Failed validating 'maxLength'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: try to update resource class with name exceed max characters - PUT: /resource_classes/$ENVIRON['CUSTOM_RES_CLASS'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.6 - data: - name: *name_exceeds_max_length_check - status: 400 - response_strings: - - "Failed validating 'maxLength'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: try to create resource class with additional properties - POST: /resource_classes - request_headers: - content-type: application/json - data: - name: CUSTOM_NFV_BAR - additional: additional - status: 400 - response_strings: - - Additional properties are not allowed diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-aggregates.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-aggregates.yaml deleted file mode 100644 index 5041be68b2ee..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-aggregates.yaml +++ /dev/null @@ -1,181 +0,0 @@ -# Tests filtering resource providers by aggregates - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - content-type: application/json - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: post new provider 1 - POST: /resource_providers - data: - name: rp_1 - uuid: 893337e9-1e55-49f0-bcfe-6a2f16fbf2f7 - status: 200 - -- name: post new provider 2 - POST: /resource_providers - data: - name: rp_2 - uuid: 5202c48f-c960-4eec-bde3-89c4f22a17b9 - status: 200 - -- name: get by aggregates no result - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91' - response_json_paths: - $.resource_providers: [] - -- name: associate an aggregate with rp1 - PUT: /resource_providers/893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/aggregates - data: - aggregates: - - 83a3d69d-8920-48e2-8914-cadfd8fa2f91 - resource_provider_generation: 0 - status: 200 - -- name: get by aggregates one result - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91' - response_json_paths: - $.resource_providers[0].uuid: 893337e9-1e55-49f0-bcfe-6a2f16fbf2f7 - -- name: get by aggregates one result no in - GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91' - response_json_paths: - $.resource_providers[0].uuid: 893337e9-1e55-49f0-bcfe-6a2f16fbf2f7 - -- name: get by aggregates no result not a uuid - GET: '/resource_providers?member_of=not+a+uuid' - status: 400 - response_strings: - - "Expected 'member_of' parameter to contain valid UUID(s)." - response_json_paths: - $.errors[0].title: Bad Request - -- name: associate an aggregate with rp2 - PUT: /resource_providers/5202c48f-c960-4eec-bde3-89c4f22a17b9/aggregates - data: - aggregates: - - 83a3d69d-8920-48e2-8914-cadfd8fa2f91 - resource_provider_generation: 0 - status: 200 - -- name: get by aggregates two result - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91' - response_json_paths: - $.resource_providers.`len`: 2 - $.resource_providers[0].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ - $.resource_providers[1].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ - -- name: associate another aggregate with rp2 - PUT: /resource_providers/5202c48f-c960-4eec-bde3-89c4f22a17b9/aggregates - data: - aggregates: - - 99652f11-9f77-46b9-80b7-4b1989be9f8c - resource_provider_generation: 1 - status: 200 - -- name: get by both aggregates two - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c' - response_json_paths: - $.resource_providers.`len`: 2 - $.resource_providers[0].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ - $.resource_providers[1].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ - -- name: clear aggregates on rp1 - PUT: /resource_providers/893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/aggregates - data: - aggregates: [] - resource_provider_generation: 1 - status: 200 - -- name: get by both aggregates one - desc: only one result because we disassociated aggregates in the PUT above - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c' - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: 5202c48f-c960-4eec-bde3-89c4f22a17b9 - -- name: error on old microversion - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c' - request_headers: - openstack-api-version: placement 1.1 - status: 400 - response_strings: - - 'Invalid query string parameters' - response_json_paths: - $.errors[0].title: Bad Request - -- name: error on bogus query parameter - GET: '/resource_providers?assoc_with_aggregate=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91,99652f11-9f77-46b9-80b7-4b1989be9f8c' - status: 400 - response_strings: - - 'Invalid query string parameters' - response_json_paths: - $.errors[0].title: Bad Request - -- name: error trying multiple member_of params prior correct microversion - GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=99652f11-9f77-46b9-80b7-4b1989be9f8c' - request_headers: - openstack-api-version: placement 1.23 - status: 400 - response_strings: - - 'Multiple member_of parameters are not supported' - response_json_paths: - $.errors[0].title: Bad Request - -- name: multiple member_of params with no results - GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=99652f11-9f77-46b9-80b7-4b1989be9f8c' - status: 200 - response_json_paths: - # No provider is associated with both aggregates - resource_providers: [] - -- name: associate two aggregates with rp2 - PUT: /resource_providers/5202c48f-c960-4eec-bde3-89c4f22a17b9/aggregates - data: - aggregates: - - 99652f11-9f77-46b9-80b7-4b1989be9f8c - - 83a3d69d-8920-48e2-8914-cadfd8fa2f91 - resource_provider_generation: 2 - status: 200 - -- name: multiple member_of params AND together to result in one provider - GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=99652f11-9f77-46b9-80b7-4b1989be9f8c' - status: 200 - response_json_paths: - # One provider is now associated with both aggregates - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: 5202c48f-c960-4eec-bde3-89c4f22a17b9 - -- name: associate two aggregates to rp1, one of which overlaps with rp2 - PUT: /resource_providers/893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/aggregates - data: - aggregates: - - 282d469e-29e2-4a8a-8f2e-31b3202b696a - - 83a3d69d-8920-48e2-8914-cadfd8fa2f91 - resource_provider_generation: 2 - status: 200 - -- name: two AND'd member_ofs with one OR'd member_of - GET: '/resource_providers?member_of=83a3d69d-8920-48e2-8914-cadfd8fa2f91&member_of=in:99652f11-9f77-46b9-80b7-4b1989be9f8c,282d469e-29e2-4a8a-8f2e-31b3202b696a' - status: 200 - response_json_paths: - # Both rp1 and rp2 returned because both are associated with agg 83a3d69d - # and each is associated with either agg 99652f11 or agg 282s469e - $.resource_providers.`len`: 2 - $.resource_providers[0].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ - $.resource_providers[1].uuid: /5202c48f-c960-4eec-bde3-89c4f22a17b9|893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ - -- name: two AND'd member_ofs using same agg UUID - GET: '/resource_providers?member_of=282d469e-29e2-4a8a-8f2e-31b3202b696a&member_of=282d469e-29e2-4a8a-8f2e-31b3202b696a' - status: 200 - response_json_paths: - # Only rp2 returned since it's the only one associated with the duplicated agg - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: /893337e9-1e55-49f0-bcfe-6a2f16fbf2f7/ diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-bug-1779818.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-bug-1779818.yaml deleted file mode 100644 index 3f4593b8b69f..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-bug-1779818.yaml +++ /dev/null @@ -1,123 +0,0 @@ -# Test launchpad bug https://bugs.launchpad.net/nova/+bug/1779818 - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: post a resource provider as alt_parent - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: alt_parent - uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - status: 200 - response_json_paths: - $.uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.name: alt_parent - $.parent_provider_uuid: null - $.generation: 0 - -- name: post another resource provider as parent - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: parent - uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 200 - response_json_paths: - $.uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - $.name: parent - $.parent_provider_uuid: null - $.generation: 0 - -- name: post a child resource provider of the parent - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: child - uuid: $ENVIRON['RP_UUID'] - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 200 - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - $.name: child - $.parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - $.generation: 0 - -# Let's validate that now we have two tree structures -# * alt_parent -# * parent -# | -# +-- child -- name: list all resource providers - GET: /resource_providers - response_json_paths: - $.resource_providers.`len`: 3 - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].parent_provider_uuid: null - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].parent_provider_uuid: null - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - -# Let's re-parent the parent to the alternative parent -# so that we have only one tree. -# * alt_parent -# | -# +-- parent -# | -# +-- child -- name: update a parent of the parent - PUT: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID'] - request_headers: - content-type: application/json - data: - name: parent - parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - status: 200 - -# Let's validate that we have only one root provider now -- name: list all resource providers updated - GET: /resource_providers - response_json_paths: - $.resource_providers.`len`: 3 - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].parent_provider_uuid: null - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - -- name: list all resource providers in a tree with the child - GET: /resource_providers?in_tree=$ENVIRON['RP_UUID'] - response_json_paths: - $.resource_providers.`len`: 3 - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - -- name: list all resource providers in a tree with the parent - GET: /resource_providers?in_tree=$ENVIRON['PARENT_PROVIDER_UUID'] - response_json_paths: - $.resource_providers.`len`: 3 - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - -- name: list all resource providers in a tree with the alternative parent - GET: /resource_providers?in_tree=$ENVIRON['ALT_PARENT_PROVIDER_UUID'] - response_json_paths: - $.resource_providers.`len`: 3 - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-duplication.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-duplication.yaml deleted file mode 100644 index 9eb60fd9f4c5..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-duplication.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# Verify different error messages was attempting to create a -# resource provider with a duplicated name or UUID. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - content-type: application/json - -tests: -- name: post new resource provider - POST: /resource_providers - data: - name: shared disk - uuid: $ENVIRON['RP_UUID'] - status: 201 - -- name: same uuid different name - POST: /resource_providers - data: - name: shared disk X - uuid: $ENVIRON['RP_UUID'] - status: 409 - response_strings: - - "Conflicting resource provider uuid: $ENVIRON['RP_UUID']" - -- name: same name different uuid - POST: /resource_providers - data: - name: shared disk - uuid: 2c2059d8-005c-4f5c-82b1-b1701b1a29b7 - status: 409 - response_strings: - - 'Conflicting resource provider name: shared disk' - -# On this one, don't test for which field was a duplicate because -# that depends on how the database reports columns. -- name: same name same uuid - POST: /resource_providers - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 409 - response_strings: - - Conflicting resource provider diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-links.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-links.yaml deleted file mode 100644 index 304d9fca9bce..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-links.yaml +++ /dev/null @@ -1,106 +0,0 @@ -# Confirm that the links provided when getting one or more resources -# providers are correct. They vary across different microversions. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - -tests: -- name: post new resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - -- name: get rp latest - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement latest - response_json_paths: - $.links.`len`: 6 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.links[?rel = "allocations"].href: /resource_providers/$ENVIRON['RP_UUID']/allocations - $.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits - -- name: get rp 1.0 - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.0 - response_json_paths: - $.links.`len`: 3 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - -- name: get rp 1.1 - desc: aggregates added in 1.1 - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.1 - response_json_paths: - $.links.`len`: 4 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - -- name: get rp 1.5 - desc: traits added after 1.5 - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.5 - response_json_paths: - $.links.`len`: 4 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - -- name: get rp 1.6 - desc: traits added in 1.6 - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.6 - response_json_paths: - $.links.`len`: 5 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - $.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits - -- name: get rp 1.7 - desc: nothing new in 1.7 - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.7 - response_json_paths: - $.links.`len`: 5 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - $.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits - -- name: get rp allocations link added in 1.11 - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.11 - response_json_paths: - $.links.`len`: 6 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.links[?rel = "allocations"].href: /resource_providers/$ENVIRON['RP_UUID']/allocations - $.links[?rel = "traits"].href: /resource_providers/$ENVIRON['RP_UUID']/traits diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-policy.yaml deleted file mode 100644 index ef663f1b481c..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-policy.yaml +++ /dev/null @@ -1,48 +0,0 @@ -# This tests the individual CRUD operations on /resource_providers -# using a non-admin user with an open policy configuration. The -# response validation is intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: list resource providers - GET: /resource_providers - response_json_paths: - $.resource_providers: [] - -- name: create resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - -- name: show resource provider - GET: /resource_providers/$ENVIRON['RP_UUID'] - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - -- name: update resource provider - PUT: /resource_providers/$ENVIRON['RP_UUID'] - data: - name: new name - status: 200 - response_json_paths: - $.name: new name - $.uuid: $ENVIRON['RP_UUID'] - -- name: delete resource provider - DELETE: /resource_providers/$ENVIRON['RP_UUID'] - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-resources-query.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-resources-query.yaml deleted file mode 100644 index f23e595b0f18..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider-resources-query.yaml +++ /dev/null @@ -1,156 +0,0 @@ - -fixtures: - - AllocationFixture - -defaults: - request_headers: - x-auth-token: admin - content-type: application/json - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: what is at resource providers - GET: /resource_providers - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - $.resource_providers[0].name: $ENVIRON['RP_NAME'] - $.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.resource_providers[0].links[?rel = "aggregates"].href: /resource_providers/$ENVIRON['RP_UUID']/aggregates - $.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - -- name: post new resource provider - POST: /resource_providers - data: - name: $ENVIRON['ALT_RP_NAME'] - uuid: $ENVIRON['ALT_RP_UUID'] - status: 200 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - -- name: now 2 providers listed - GET: /resource_providers - response_json_paths: - $.resource_providers.`len`: 2 - -- name: list resource providers providing resources filter before API 1.4 - GET: /resource_providers?resources=VCPU:1 - request_headers: - openstack-api-version: placement 1.3 - status: 400 - response_strings: - - 'Invalid query string parameters' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing a badly-formatted resources filter - GET: /resource_providers?resources=VCPU - status: 400 - response_strings: - - 'Badly formed resources parameter. Expected resources query string parameter in form:' - - 'Got: VCPU.' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing a resources filter with non-integer amount - GET: /resource_providers?resources=VCPU:fred - status: 400 - response_strings: - - 'Requested resource VCPU expected positive integer amount.' - - 'Got: fred.' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing a resources filter with negative amount - GET: /resource_providers?resources=VCPU:-2 - status: 400 - response_strings: - - 'Requested resource VCPU requires amount >= 1.' - - 'Got: -2.' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing a resource class not existing - GET: /resource_providers?resources=MYMISSINGCLASS:1 - status: 400 - response_strings: - - 'Invalid resource class in resources parameter' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing a bad trailing comma - GET: /resource_providers?resources=DISK_GB:500, - status: 400 - response_strings: - - 'Badly formed resources parameter. Expected resources query string parameter in form:' - # NOTE(mriedem): The value is empty because splitting on the trailing - # comma results in an empty string. - - 'Got: .' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing empty resources - GET: /resource_providers?resources= - status: 400 - response_strings: - - Badly formed resources parameter. Expected resources query string parameter in form - - 'Got: empty string.' - -- name: list resource providers providing disk resources - GET: /resource_providers?resources=DISK_GB:500 - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - -- name: list resource providers providing disk and vcpu resources - GET: /resource_providers?resources=DISK_GB:500,VCPU:2 - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - -- name: list resource providers providing resources (no match - less than min_unit) - GET: /resource_providers?resources=DISK_GB:1 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list resource providers providing resources (no match - more than max_unit) - GET: /resource_providers?resources=DISK_GB:1010 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list resource providers providing resources (no match - not enough inventory) - GET: /resource_providers?resources=DISK_GB:102400 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list resource providers providing resources (no match - bad step size) - GET: /resource_providers?resources=DISK_GB:11 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list resource providers providing resources (no match - no inventory of resource) - GET: /resource_providers?resources=MEMORY_MB:10240 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list resource providers providing resources (no match - not enough VCPU) - GET: /resource_providers?resources=DISK_GB:500,VCPU:4 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: associate an aggregate with rp1 - PUT: /resource_providers/$ENVIRON['RP_UUID']/aggregates - data: - aggregates: - - 83a3d69d-8920-48e2-8914-cadfd8fa2f91 - resource_provider_generation: $HISTORY['list resource providers providing disk and vcpu resources'].$RESPONSE['$.resource_providers[0].generation'] - status: 200 - -- name: get by aggregates with resources - GET: '/resource_providers?member_of=in:83a3d69d-8920-48e2-8914-cadfd8fa2f91&resources=VCPU:2' - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] diff --git a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider.yaml b/nova/tests/functional/api/openstack/placement/gabbits/resource-provider.yaml deleted file mode 100644 index c550e80dca4e..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/resource-provider.yaml +++ /dev/null @@ -1,775 +0,0 @@ - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: what is at resource providers - GET: /resource_providers - request_headers: - # microversion 1.15 for cache headers - openstack-api-version: placement 1.15 - response_json_paths: - $.resource_providers: [] - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: non admin forbidden - GET: /resource_providers - request_headers: - x-auth-token: user - accept: application/json - status: 403 - response_json_paths: - $.errors[0].title: Forbidden - -- name: route not found non json - GET: /moo - request_headers: - accept: text/plain - status: 404 - response_strings: - - The resource could not be found - -- name: post new resource provider - old microversion - POST: /resource_providers - request_headers: - content-type: application/json - openstack-api-version: placement 1.19 - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - response_forbidden_headers: - - content-type - -- name: delete it - DELETE: $LOCATION - status: 204 - -- name: post new resource provider - new microversion - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - $.name: $ENVIRON['RP_NAME'] - $.parent_provider_uuid: null - $.generation: 0 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - -# On this one, don't test for which field was a duplicate because -# that depends on how the database reports columns. -- name: try to create same all again - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 409 - response_strings: - - Conflicting resource provider - response_json_paths: - $.errors[0].title: Conflict - -- name: try to create same name again - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: ada30fb5-566d-4fe1-b43b-28a9e988790c - status: 409 - response_strings: - - "Conflicting resource provider name: $ENVIRON['RP_NAME'] already exists" - response_json_paths: - $.errors[0].title: Conflict - $.errors[0].code: placement.duplicate_name - -- name: confirm the correct post - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.15 - response_headers: - content-type: application/json - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - $.name: $ENVIRON['RP_NAME'] - $.parent_provider_uuid: null - $.generation: 0 - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - -- name: get resource provider works with no accept - GET: /resource_providers/$ENVIRON['RP_UUID'] - response_headers: - content-type: /application/json/ - response_json_paths: - $.uuid: $ENVIRON['RP_UUID'] - -- name: get non-existing resource provider - GET: /resource_providers/d67370b5-4dc0-470d-a4fa-85e8e89abc6c - status: 404 - response_strings: - - No resource provider with uuid d67370b5-4dc0-470d-a4fa-85e8e89abc6c found - response_json_paths: - $.errors[0].title: Not Found - -- name: list one resource providers - GET: /resource_providers - request_headers: - openstack-api-version: placement 1.15 - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - $.resource_providers[0].name: $ENVIRON['RP_NAME'] - $.resource_providers[0].generation: 0 - $.resource_providers[0].parent_provider_uuid: null - $.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: filter out all resource providers by name - GET: /resource_providers?name=flubblebubble - response_json_paths: - $.resource_providers.`len`: 0 - -- name: filter out all resource providers by uuid - GET: /resource_providers?uuid=d67370b5-4dc0-470d-a4fa-85e8e89abc6c - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list one resource provider filtering by name - GET: /resource_providers?name=$ENVIRON['RP_NAME'] - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - $.resource_providers[0].name: $ENVIRON['RP_NAME'] - $.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - -- name: list resource providers filtering by invalid uuid - GET: /resource_providers?uuid=spameggs - status: 400 - response_strings: - - 'Invalid query string parameters' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list resource providers providing an invalid filter - GET: /resource_providers?spam=eggs - status: 400 - response_strings: - - 'Invalid query string parameters' - response_json_paths: - $.errors[0].title: Bad Request - -- name: list one resource provider filtering by uuid with allocations link - GET: /resource_providers?uuid=$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.11 - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - $.resource_providers[0].name: $ENVIRON['RP_NAME'] - $.resource_providers[0].links.`len`: 6 - $.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - $.resource_providers[0].links[?rel = "allocations"].href: /resource_providers/$ENVIRON['RP_UUID']/allocations - -- name: list one resource provider filtering by uuid no allocations link - GET: /resource_providers?uuid=$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.10 - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[0].uuid: $ENVIRON['RP_UUID'] - $.resource_providers[0].name: $ENVIRON['RP_NAME'] - $.resource_providers[0].links.`len`: 5 - $.resource_providers[0].links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - $.resource_providers[0].links[?rel = "inventories"].href: /resource_providers/$ENVIRON['RP_UUID']/inventories - $.resource_providers[0].links[?rel = "usages"].href: /resource_providers/$ENVIRON['RP_UUID']/usages - -- name: update a resource provider's name - PUT: /resource_providers/$RESPONSE['$.resource_providers[0].uuid'] - request_headers: - content-type: application/json - openstack-api-version: placement 1.15 - data: - name: new name - status: 200 - response_headers: - content-type: /application/json/ - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - response_forbidden_headers: - - location - response_json_paths: - $.generation: 0 - $.name: new name - $.uuid: $ENVIRON['RP_UUID'] - $.links[?rel = "self"].href: /resource_providers/$ENVIRON['RP_UUID'] - -- name: check the name from that update - GET: $LAST_URL - response_json_paths: - $.name: new name - -- name: update a provider poorly - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - badfield: new name - status: 400 - response_strings: - - 'JSON does not validate' - response_json_paths: - $.errors[0].title: Bad Request - -# This section of tests validate nested resource provider relationships and -# constraints. We attempt to set the parent provider UUID for the primary -# resource provider to a UUID value of a provider we have not yet created and -# expect a failure. We then create that parent provider record and attempt to -# set the same parent provider UUID without also setting the root provider UUID -# to the same value, with an expected failure. Finally, we set the primary -# provider's root AND parent to the new provider UUID and verify success. - -- name: test POST microversion limits nested providers - POST: /resource_providers - request_headers: - openstack-api-version: placement 1.13 - content-type: application/json - data: - name: child - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 'JSON does not validate' - -- name: test PUT microversion limits nested providers - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - openstack-api-version: placement 1.13 - content-type: application/json - data: - name: child - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 'JSON does not validate' - -- name: fail trying to set a root provider UUID - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - data: - root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 'JSON does not validate' - -- name: fail trying to self-parent - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: child - uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 'parent provider UUID cannot be same as UUID' - - 'Unable to create resource provider \"child\", $ENVIRON["ALT_PARENT_PROVIDER_UUID"]:' - -- name: update a parent provider UUID to non-existing provider - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - data: - name: parent - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 'parent provider UUID does not exist' - -- name: now create the parent provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: parent - uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 200 - response_json_paths: - $.uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - $.name: parent - $.parent_provider_uuid: null - $.generation: 0 - -- name: get provider with old microversion no root provider UUID field - GET: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID'] - request_headers: - openstack-api-version: placement 1.13 - content-type: application/json - response_json_paths: - $.`len`: 4 - name: parent - status: 200 - -- name: get provider has root provider UUID field - GET: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID'] - request_headers: - content-type: application/json - response_json_paths: - $.`len`: 6 - name: parent - root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - parent_provider_uuid: null - status: 200 - -- name: update a parent - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - data: - name: child - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 200 - -- name: get provider has new parent and root provider UUID field - GET: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - response_json_paths: - name: child - root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 200 - -- name: fail trying to un-parent - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - data: - name: child - parent_provider_uuid: null - status: 400 - response_strings: - - 'un-parenting a provider is not currently allowed' - -- name: 409 conflict while trying to delete parent with existing child - DELETE: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID'] - status: 409 - response_strings: - - "Unable to delete parent resource provider - $ENVIRON['PARENT_PROVIDER_UUID']: It has child resource providers." - response_json_paths: - $.errors[0].code: placement.resource_provider.cannot_delete_parent - -- name: list all resource providers in a tree that does not exist - GET: /resource_providers?in_tree=$ENVIRON['ALT_PARENT_PROVIDER_UUID'] - response_json_paths: - $.resource_providers.`len`: 0 - -- name: list all resource providers in a tree with multiple providers in tree - GET: /resource_providers?in_tree=$ENVIRON['RP_UUID'] - response_json_paths: - $.resource_providers.`len`: 2 - # Verify that we have both the parent and child in the list - $.resource_providers[?uuid="$ENVIRON['PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - -- name: create a new parent provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: altwparent - uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - status: 200 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - response_json_paths: - $.uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.name: altwparent - -- name: list all resource providers in a tree - GET: /resource_providers?in_tree=$ENVIRON['ALT_PARENT_PROVIDER_UUID'] - response_json_paths: - $.resource_providers.`len`: 1 - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - -- name: filter providers by traits none of them have - GET: /resource_providers?required=HW_CPU_X86_SGX,HW_CPU_X86_SHA - response_json_paths: - $.resource_providers.`len`: 0 - -- name: add traits to a provider - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - data: - resource_provider_generation: 0 - traits: ['HW_CPU_X86_SGX', 'STORAGE_DISK_SSD'] - -- name: add traits to another provider - PUT: /resource_providers/$ENVIRON['ALT_PARENT_PROVIDER_UUID']/traits - request_headers: - content-type: application/json - data: - resource_provider_generation: 0 - traits: ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD'] - -- name: filter providers with multiple traits where no provider has all of them - GET: /resource_providers?required=HW_CPU_X86_SGX,MISC_SHARES_VIA_AGGREGATE - response_json_paths: - $.resource_providers.`len`: 0 - -- name: filter providers with a trait some of them have - GET: /resource_providers?required=STORAGE_DISK_SSD - response_json_paths: - $.resource_providers.`len`: 2 - # Don't really care about the root UUID - just validating that the providers present are the ones we expected - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - -- name: list providers with 'required' parameter filters cumulatively with in_tree - GET: /resource_providers?required=STORAGE_DISK_SSD&in_tree=$ENVIRON['RP_UUID'] - response_json_paths: - $.resource_providers.`len`: 1 - # Only RP_UUID satisfies both the tree and trait constraint - $.resource_providers[?uuid="$ENVIRON['RP_UUID']"].root_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - -- name: list providers for full count - GET: /resource_providers - response_json_paths: - $.resource_providers.`len`: 3 - -- name: list providers forbidden 1.22 - GET: /resource_providers?required=!STORAGE_DISK_SSD - response_json_paths: - $.resource_providers.`len`: 1 - -- name: confirm forbidden trait not there - GET: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']/traits - response_json_paths: - $.traits: [] - -- name: list providers forbidden 1.21 - GET: /resource_providers?required=!STORAGE_DISK_SSD - request_headers: - openstack-api-version: placement 1.21 - status: 400 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC. Got: !STORAGE_DISK_SSD" - -- name: list providers forbidden again - GET: /resource_providers?required=!MISC_SHARES_VIA_AGGREGATE - response_json_paths: - $.resource_providers.`len`: 2 - -- name: mixed required and forbidden - GET: /resource_providers?required=!HW_CPU_X86_SGX,STORAGE_DISK_SSD - response_json_paths: - $.resource_providers.`len`: 1 - -- name: confirm mixed required and forbidden - GET: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']/traits - response_json_paths: - $.traits.`sorted`: ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_SSD'] - -- name: multiple forbidden - GET: /resource_providers?required=!MISC_SHARES_VIA_AGGREGATE,!HW_CPU_X86_SGX - response_json_paths: - $.resource_providers.`len`: 1 - -- name: confirm multiple forbidden - GET: /resource_providers/$RESPONSE['$.resource_providers[0].uuid']/traits - response_json_paths: - $.traits: [] - -- name: forbidden no apply - GET: /resource_providers?required=!HW_CPU_X86_VMX - response_json_paths: - $.resource_providers.`len`: 3 - -- name: create some inventory - PUT: /resource_providers/$ENVIRON['ALT_PARENT_PROVIDER_UUID']/inventories - request_headers: - content-type: application/json - data: - resource_provider_generation: 1 - inventories: - IPV4_ADDRESS: - total: 253 - DISK_GB: - total: 1024 - status: 200 - response_json_paths: - $.resource_provider_generation: 2 - $.inventories.IPV4_ADDRESS.total: 253 - $.inventories.IPV4_ADDRESS.reserved: 0 - $.inventories.DISK_GB.total: 1024 - $.inventories.DISK_GB.allocation_ratio: 1.0 - -- name: list providers with 'required' parameter filters cumulatively with resources - GET: /resource_providers?required=STORAGE_DISK_SSD&resources=IPV4_ADDRESS:10 - response_json_paths: - $.resource_providers.`len`: 1 - # Only ALT_PARENT_PROVIDER_UUID satisfies both the tree and trait constraint - $.resource_providers[?uuid="$ENVIRON['ALT_PARENT_PROVIDER_UUID']"].root_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - -- name: invalid 'required' parameter - blank - GET: /resource_providers?required= - status: 400 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,!CUSTOM_MAGIC." - response_json_paths: - $.errors[0].title: Bad Request - -- name: invalid 'required' parameter 1.21 - GET: /resource_providers?required= - request_headers: - openstack-api-version: placement 1.21 - status: 400 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC." - response_json_paths: - $.errors[0].title: Bad Request - -- name: invalid 'required' parameter - contains an empty trait name - GET: /resource_providers?required=STORAGE_DISK_SSD,,MISC_SHARES_VIA_AGGREGATE - status: 400 - response_strings: - - "Invalid query string parameters: Expected 'required' parameter value of the form: HW_CPU_X86_VMX,!CUSTOM_MAGIC." - response_json_paths: - $.errors[0].title: Bad Request - -- name: invalid 'required' parameter - contains a nonexistent trait - GET: /resource_providers?required=STORAGE_DISK_SSD,BOGUS_TRAIT,MISC_SHARES_VIA_AGGREGATE - status: 400 - response_strings: - - "No such trait(s): BOGUS_TRAIT." - response_json_paths: - $.errors[0].title: Bad Request - -- name: schema validation fails with 'required' parameter on old microversion - request_headers: - openstack-api-version: placement 1.17 - GET: /resource_providers?required=HW_CPU_X86_SGX,MISC_SHARES_VIA_AGGREGATE - status: 400 - response_strings: - - Additional properties are not allowed - response_json_paths: - $.errors[0].title: Bad Request - -- name: fail trying to re-parent to a different provider - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - data: - name: child - parent_provider_uuid: $ENVIRON['ALT_PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 're-parenting a provider is not currently allowed' - -- name: create a new provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: cow - status: 200 - -- name: try to rename that provider to existing name - PUT: $LOCATION - request_headers: - content-type: application/json - data: - name: child - status: 409 - response_json_paths: - $.errors[0].title: Conflict - $.errors[0].code: placement.duplicate_name - -- name: fail to put that provider with uuid - PUT: $LAST_URL - request_headers: - content-type: application/json - data: - name: second new name - uuid: 7d4275fc-8b40-4995-85e2-74fcec2cb3b6 - status: 400 - response_strings: - - Additional properties are not allowed - response_json_paths: - $.errors[0].title: Bad Request - -- name: delete resource provider - DELETE: $LAST_URL - status: 204 - -- name: 404 on deleted provider - DELETE: $LAST_URL - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: fail to get a provider - GET: /resource_providers/random_sauce - status: 404 - response_json_paths: - $.errors[0].title: Not Found - -- name: delete non-existing resource provider - DELETE: /resource_providers/d67370b5-4dc0-470d-a4fa-85e8e89abc6c - status: 404 - response_strings: - - No resource provider with uuid d67370b5-4dc0-470d-a4fa-85e8e89abc6c found for delete - response_json_paths: - $.errors[0].title: Not Found - -- name: post resource provider no uuid - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: a name - status: 200 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - -- name: post malformed json as json - POST: /resource_providers - request_headers: - content-type: application/json - data: '{"foo": }' - status: 400 - response_strings: - - 'Malformed JSON:' - response_json_paths: - $.errors[0].title: Bad Request - -- name: post bad uuid in resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: my bad rp - uuid: this is not a uuid - status: 400 - response_strings: - - "Failed validating 'format'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: try to create resource provider with name exceed max characters - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: &name_exceeds_max_length_check This is a long text of 201 charactersssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss - status: 400 - response_strings: - - "Failed validating 'maxLength'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: try to update resource provider with name exceed max characters - PUT: /resource_providers/$ENVIRON['RP_UUID'] - request_headers: - content-type: application/json - data: - name: *name_exceeds_max_length_check - status: 400 - response_strings: - - "Failed validating 'maxLength'" - response_json_paths: - $.errors[0].title: Bad Request - -- name: confirm no cache-control headers before 1.15 - GET: /resource_providers - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - cache-control - - last-modified - -- name: fail updating a parent to itself - PUT: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID'] - request_headers: - content-type: application/json - data: - name: parent - parent_provider_uuid: $ENVIRON['PARENT_PROVIDER_UUID'] - status: 400 - response_strings: - - 'creating loop in the provider tree is not allowed.' - -- name: fail updating the parent to point to its child - PUT: /resource_providers/$ENVIRON['PARENT_PROVIDER_UUID'] - request_headers: - content-type: application/json - data: - name: parent - parent_provider_uuid: $ENVIRON['RP_UUID'] - status: 400 - response_strings: - - 'creating loop in the provider tree is not allowed.' - -- name: create a resource provider with dashed uuid - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: rp with dashed uuid - uuid: 2290d4af-9e6e-400b-9d65-1ee01376f71a - status: 200 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - -- name: try to create with the same uuid but without dashes - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: rp with dashless uuid - uuid: 2290d4af9e6e400b9d651ee01376f71a - status: 409 - response_strings: - - "Conflicting resource provider uuid: 2290d4af-9e6e-400b-9d65-1ee01376f71a already exists" - response_json_paths: - $.errors[0].title: Conflict diff --git a/nova/tests/functional/api/openstack/placement/gabbits/shared-resources.yaml b/nova/tests/functional/api/openstack/placement/gabbits/shared-resources.yaml deleted file mode 100644 index 29c22e3d54d9..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/shared-resources.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Create a shared resource provider that shares a custom resource -# class with a compute node and confim that it is returned when -# requesting resources. -# -# NOTE(cdent): raw uuids are used here instead of environment variables as -# there's no need to query on them or change them, but something has to be -# there. - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - content-type: application/json - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: create compute node 1 - POST: /resource_providers - data: - name: cn1 - uuid: 8d830468-6395-46b0-b56a-f934a1d60bbe - status: 200 - -- name: cn1 inventory - PUT: /resource_providers/8d830468-6395-46b0-b56a-f934a1d60bbe/inventories - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 20 - MEMORY_MB: - total: 100000 - status: 200 - -- name: create compute node 2 - POST: /resource_providers - data: - name: cn2 - uuid: ed6ea55d-01ce-4e11-ba97-13a4e5540b3e - status: 200 - -- name: cn2 inventory - PUT: /resource_providers/ed6ea55d-01ce-4e11-ba97-13a4e5540b3e/inventories - data: - resource_provider_generation: 0 - inventories: - VCPU: - total: 20 - MEMORY_MB: - total: 100000 - DISK_GB: - total: 100000 - status: 200 - -- name: create custom magic - PUT: /resource_classes/CUSTOM_MAGIC - status: 201 - -- name: create shared 1 - POST: /resource_providers - data: - uuid: d450bd39-3b01-4355-9ea1-594f96594cf1 - name: custom magic share - status: 200 - -- name: shared 1 inventory - PUT: /resource_providers/d450bd39-3b01-4355-9ea1-594f96594cf1/inventories - data: - resource_provider_generation: 0 - inventories: - CUSTOM_MAGIC: - total: 5 - status: 200 - -# no aggregate association -- name: get resources no agg - GET: /resource_providers?resources=VCPU:1,CUSTOM_MAGIC:1 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: get allocation candidates no agg - desc: this sometimes fails - GET: /allocation_candidates?resources=VCPU:1,CUSTOM_MAGIC:1 - response_json_paths: - $.allocation_requests.`len`: 0 - $.provider_summaries.`len`: 0 - -- name: aggregate shared - PUT: /resource_providers/d450bd39-3b01-4355-9ea1-594f96594cf1/aggregates - data: - aggregates: - - f3dc0f36-97d4-4daf-be0c-d71466da9c85 - resource_provider_generation: 1 - -- name: aggregate cn1 - PUT: /resource_providers/8d830468-6395-46b0-b56a-f934a1d60bbe/aggregates - data: - aggregates: - - f3dc0f36-97d4-4daf-be0c-d71466da9c85 - resource_provider_generation: 1 - -# no shared trait -- name: get resources no shared - GET: /resource_providers?resources=VCPU:1,CUSTOM_MAGIC:1 - response_json_paths: - $.resource_providers.`len`: 0 - -- name: get allocation candidates no shared - GET: /allocation_candidates?resources=VCPU:1,CUSTOM_MAGIC:1 - response_json_paths: - $.allocation_requests.`len`: 0 - $.provider_summaries.`len`: 0 - -- name: set trait shared - PUT: /resource_providers/d450bd39-3b01-4355-9ea1-594f96594cf1/traits - data: - resource_provider_generation: 2 - traits: - - MISC_SHARES_VIA_AGGREGATE - -# this should be zero because we only expect those resource providers which -# can fully satisfy the resources query themselves when making requests of -# /resource_providers. This may change in the future depending on use -# cases. This test and the next demonstrate and confirm that -# /resource_providers and /allocation_candidates have different behaviors. -- name: get resources shared - GET: /resource_providers?resources=VCPU:1,CUSTOM_MAGIC:1 - response_json_paths: - $.resource_providers.`len`: 0 - -# this is one allocation request and two resource providers because -# at /allocation_candiates we expect those resource providers which -# can either fully the resources query or can do so with the -# assistance of a sharing provider. -- name: get allocation candidates shared - GET: /allocation_candidates?resources=VCPU:1,CUSTOM_MAGIC:1 - response_json_paths: - $.allocation_requests.`len`: 1 - $.provider_summaries.`len`: 2 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/traits-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/traits-policy.yaml deleted file mode 100644 index 03cc1544406c..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/traits-policy.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# This tests the individual CRUD operations on -# /traits* and /resource_providers/{uuid}/traits using a non-admin user with an -# open policy configuration. The response validation is intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - content-type: application/json - openstack-api-version: placement latest - -tests: - -- name: list traits - GET: /traits - status: 200 - -- name: create a trait - PUT: /traits/CUSTOM_TRAIT_X - status: 201 - -- name: show trait - GET: /traits/CUSTOM_TRAIT_X - status: 204 - -- name: create resource provider - POST: /resource_providers - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - -- name: list resource provider traits - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - status: 200 - -- name: update resource provider traits - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 200 - data: - traits: - - CUSTOM_TRAIT_X - resource_provider_generation: 0 - -- name: delete resource provider traits - DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits - status: 204 - -- name: delete trait - DELETE: /traits/CUSTOM_TRAIT_X - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml b/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml deleted file mode 100644 index a5010d2cb4d4..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/traits.yaml +++ /dev/null @@ -1,487 +0,0 @@ - -fixtures: - - APIFixture - -defaults: - request_headers: - x-auth-token: admin - # traits introduced in 1.6 - openstack-api-version: placement 1.6 - -tests: - -- name: create a trait without custom namespace - PUT: /traits/TRAIT_X - status: 400 - response_strings: - - 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"' - -- name: create a trait with invalid characters - PUT: /traits/CUSTOM_ABC:1 - status: 400 - response_strings: - - 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"' - -- name: create a trait with name exceed max characters - PUT: /traits/CUSTOM_ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNO - status: 400 - response_strings: - - 'The trait is invalid. A valid trait must be no longer than 255 characters, start with the prefix \"CUSTOM_\" and use following characters: \"A\"-\"Z\", \"0\"-\"9\" and \"_\"' - -- name: create a trait earlier version - PUT: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: create a trait - PUT: /traits/CUSTOM_TRAIT_1 - status: 201 - response_headers: - location: //traits/CUSTOM_TRAIT_1/ - response_forbidden_headers: - - content-type - # PUT in 1.6 version should not have cache headers - - cache-control - - last-modified - -- name: create a trait which existed - PUT: /traits/CUSTOM_TRAIT_1 - status: 204 - response_headers: - location: //traits/CUSTOM_TRAIT_1/ - response_forbidden_headers: - - content-type - -- name: get a trait earlier version - GET: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: get a trait - GET: /traits/CUSTOM_TRAIT_1 - status: 204 - response_forbidden_headers: - - content-type - # In early versions cache headers should not be present - - cache-control - - last-modified - -- name: get a non-existed trait - GET: /traits/NON_EXISTED - status: 404 - -- name: delete a trait earlier version - DELETE: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: delete a trait - DELETE: /traits/CUSTOM_TRAIT_1 - status: 204 - response_forbidden_headers: - - content-type - # DELETE in any version should not have cache headers - - cache-control - - last-modified - -- name: delete a non-existed trait - DELETE: /traits/CUSTOM_NON_EXSITED - status: 404 - -- name: try to delete standard trait - DELETE: /traits/HW_CPU_X86_SSE - status: 400 - response_strings: - - Cannot delete standard trait - -- name: create CUSTOM_TRAIT_1 - PUT: /traits/CUSTOM_TRAIT_1 - status: 201 - response_headers: - location: //traits/CUSTOM_TRAIT_1/ - response_forbidden_headers: - - content-type - -- name: create CUSTOM_TRAIT_2 - PUT: /traits/CUSTOM_TRAIT_2 - status: 201 - response_headers: - location: //traits/CUSTOM_TRAIT_2/ - response_forbidden_headers: - - content-type - -# NOTE(cdent): This simply tests that traits we know should be -# present are in the results. We can't check length here because -# the standard traits, which will grow over time, are present. -- name: list traits - GET: /traits - status: 200 - response_strings: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - - MISC_SHARES_VIA_AGGREGATE - - HW_CPU_X86_SHA - -- name: list traits earlier version - GET: /traits - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: list traits with invalid format of name parameter - GET: /traits?name=in_abc - status: 400 - response_strings: - - 'Badly formatted name parameter. Expected name query string parameter in form: ?name=[in|startswith]:[name1,name2|prefix]. Got: \"in_abc\"' - -- name: list traits with name=in filter - GET: /traits?name=in:CUSTOM_TRAIT_1,CUSTOM_TRAIT_2 - status: 200 - response_json_paths: - $.traits.`len`: 2 - response_strings: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - -- name: create CUSTOM_ANOTHER_TRAIT - PUT: /traits/CUSTOM_ANOTHER_TRAIT - status: 201 - response_headers: - location: //traits/CUSTOM_ANOTHER_TRAIT/ - response_forbidden_headers: - - content-type - -- name: list traits with prefix - GET: /traits?name=startswith:CUSTOM_TRAIT - status: 200 - response_json_paths: - $.traits.`len`: 2 - response_strings: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - -- name: list traits with invalid parameters - GET: /traits?invalid=abc - status: 400 - response_strings: - - "Invalid query string parameters: Additional properties are not allowed" - -- name: list traits 1.14 no cache headers - GET: /traits - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - cache-control - - last-modified - -- name: list traits 1.15 has cache headers - GET: /traits - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: get trait 1.14 no cache headers - GET: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.14 - status: 204 - response_forbidden_headers: - - cache-control - - last-modified - -- name: get trait 1.15 has cache headers - GET: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.15 - status: 204 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: put trait 1.14 no cache headers - PUT: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.14 - status: 204 - response_forbidden_headers: - - cache-control - - last-modified - -- name: put trait 1.15 has cache headers - PUT: /traits/CUSTOM_TRAIT_1 - request_headers: - openstack-api-version: placement 1.15 - status: 204 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: post new resource provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 201 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - response_forbidden_headers: - - content-type - -- name: list traits for resource provider earlier version - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: list traits for resource provider without traits - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - status: 200 - response_json_paths: - $.resource_provider_generation: 0 - $.traits.`len`: 0 - response_forbidden_headers: - # In 1.6 no cache headers - - cache-control - - last-modified - -- name: set traits for resource provider earlier version - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - openstack-api-version: placement 1.5 - status: 404 - -- name: set traits for resource provider - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 200 - data: - traits: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - resource_provider_generation: 0 - response_json_paths: - $.resource_provider_generation: 1 - $.traits.`len`: 2 - response_strings: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - response_forbidden_headers: - # In 1.6 no cache headers - - cache-control - - last-modified - -- name: get associated traits - GET: /traits?associated=true - status: 200 - response_json_paths: - $.traits.`len`: 2 - response_strings: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - -- name: get associated traits with invalid value - GET: /traits?associated=xyz - status: 400 - response_strings: - - 'The query parameter \"associated\" only accepts \"true\" or \"false\"' - -- name: set traits for resource provider without resource provider generation - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 400 - data: - traits: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - response_strings: - - "'resource_provider_generation' is a required property" - -- name: set traits for resource provider with invalid resource provider generation - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 400 - data: - traits: - - CUSTOM_TRAIT_1 - resource_provider_generation: invalid_generation - response_strings: - - "'invalid_generation' is not of type 'integer'" - -- name: set traits for resource provider with conflict generation - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - openstack-api-version: placement 1.23 - status: 409 - data: - traits: - - CUSTOM_TRAIT_1 - resource_provider_generation: 5 - response_strings: - - Resource provider's generation already changed. Please update the generation and try again. - response_json_paths: - $.errors[0].code: placement.concurrent_update - -- name: set non existed traits for resource provider - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 400 - data: - traits: - - NON_EXISTED_TRAIT1 - - NON_EXISTED_TRAIT2 - - CUSTOM_TRAIT_1 - resource_provider_generation: 1 - response_strings: - - No such trait - - NON_EXISTED_TRAIT1 - - NON_EXISTED_TRAIT2 - -- name: set traits for resource provider with invalid type of traits - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 400 - data: - traits: invalid_type - resource_provider_generation: 1 - response_strings: - - "'invalid_type' is not of type 'array'" - -- name: set traits for resource provider with additional properties - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - content-type: application/json - status: 400 - data: - traits: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - resource_provider_generation: 1 - additional: additional - response_strings: - - 'Additional properties are not allowed' - -- name: set traits for non_existed resource provider - PUT: /resource_providers/non_existed/traits - request_headers: - content-type: application/json - data: - traits: - - CUSTOM_TRAIT_1 - resource_provider_generation: 1 - status: 404 - response_strings: - - No resource provider with uuid non_existed found - -- name: list traits for resource provider - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - status: 200 - response_json_paths: - $.resource_provider_generation: 1 - $.traits.`len`: 2 - response_strings: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - -- name: delete an in-use trait - DELETE: /traits/CUSTOM_TRAIT_1 - status: 409 - response_strings: - - The trait CUSTOM_TRAIT_1 is in use by a resource provider. - -- name: list traits for non_existed resource provider - GET: /resource_providers/non_existed/traits - request_headers: - content-type: application/json - status: 404 - response_strings: - - No resource provider with uuid non_existed found - -- name: delete traits for resource provider earlier version - DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - openstack-api-version: placement 1.5 - status: 404 - -- name: delete traits for resource provider - DELETE: /resource_providers/$ENVIRON['RP_UUID']/traits - status: 204 - response_forbidden_headers: - - content-type - -- name: delete traits for non_existed resource provider - DELETE: /resource_providers/non_existed/traits - status: 404 - response_strings: - - No resource provider with uuid non_existed found - -- name: empty traits for resource provider 1.15 has cache headers - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: update rp trait 1.14 no cache headers - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - data: - traits: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - resource_provider_generation: 2 - request_headers: - openstack-api-version: placement 1.14 - content-type: application/json - response_forbidden_headers: - - cache-control - - last-modified - -- name: update rp trait 1.15 has cache headers - PUT: /resource_providers/$ENVIRON['RP_UUID']/traits - data: - traits: - - CUSTOM_TRAIT_1 - - CUSTOM_TRAIT_2 - resource_provider_generation: 3 - request_headers: - openstack-api-version: placement 1.15 - content-type: application/json - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: list traits for resource provider 1.14 no cache headers - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - openstack-api-version: placement 1.14 - response_forbidden_headers: - - cache-control - - last-modified - -- name: list traits for resource provider 1.15 has cache headers - GET: /resource_providers/$ENVIRON['RP_UUID']/traits - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ diff --git a/nova/tests/functional/api/openstack/placement/gabbits/unicode.yaml b/nova/tests/functional/api/openstack/placement/gabbits/unicode.yaml deleted file mode 100644 index 1851694e12dc..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/unicode.yaml +++ /dev/null @@ -1,40 +0,0 @@ - -fixtures: - - APIFixture - -defaults: - request_headers: - accept: application/json - x-auth-token: admin - -tests: - -- name: get an encoded snowman - desc: this should fall through to a NotFound on the resource provider object - GET: /resources_providers/%e2%98%83 - status: 404 - -- name: post resource provider with snowman - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: ☃ - uuid: $ENVIRON['RP_UUID'] - status: 201 - response_headers: - location: //resource_providers/[a-f0-9-]+/ - -- name: get that resource provider - GET: $LOCATION - response_json_paths: - $.name: ☃ - -- name: query by name - GET: /resource_providers?name=%e2%98%83 - response_json_paths: - $.resource_providers[0].name: ☃ - -- name: delete that one - DELETE: /resource_providers/$ENVIRON['RP_UUID'] - status: 204 diff --git a/nova/tests/functional/api/openstack/placement/gabbits/usage-policy.yaml b/nova/tests/functional/api/openstack/placement/gabbits/usage-policy.yaml deleted file mode 100644 index 7feb3b98da70..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/usage-policy.yaml +++ /dev/null @@ -1,33 +0,0 @@ -# This tests the individual CRUD operations on -# /resource_providers/{uuid}/usages and /usages -# using a non-admin user with an open policy configuration. The -# response validation is intentionally minimal. -fixtures: - - OpenPolicyFixture - -defaults: - request_headers: - x-auth-token: user - accept: application/json - openstack-api-version: placement latest - -tests: - -- name: create provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: $ENVIRON['RP_NAME'] - uuid: $ENVIRON['RP_UUID'] - status: 200 - -- name: list provider usages - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_json_paths: - usages: {} - -- name: get total usages for project - GET: /usages?project_id=$ENVIRON['PROJECT_ID] - response_json_paths: - usages: {} diff --git a/nova/tests/functional/api/openstack/placement/gabbits/usage.yaml b/nova/tests/functional/api/openstack/placement/gabbits/usage.yaml deleted file mode 100644 index e83ea8d39b46..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/usage.yaml +++ /dev/null @@ -1,120 +0,0 @@ -# More interesting tests for usages are in with_allocations - -fixtures: - - APIFixture - -defaults: - request_headers: - accept: application/json - x-auth-token: admin - -tests: - -- name: fail to get usages for missing provider - GET: /resource_providers/fae14fa3-4b43-498c-a33c-4a1d00edb577/usages - status: 404 - response_strings: - - No resource provider with uuid fae14fa3-4b43-498c-a33c-4a1d00edb577 found - response_json_paths: - $.errors[0].title: Not Found - -- name: create provider - POST: /resource_providers - request_headers: - content-type: application/json - data: - name: a name - status: 201 - -- name: check provider exists - GET: $LOCATION - response_json_paths: - name: a name - -- name: get empty usages - GET: $LAST_URL/usages - request_headers: - content-type: application/json - response_json_paths: - usages: {} - -- name: get usages no cache headers base microversion - GET: $LAST_URL - response_forbidden_headers: - - last-modified - - cache-control - -- name: get usages cache headers 1.15 - GET: $LAST_URL - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ - -- name: get total usages earlier version - GET: /usages?project_id=$ENVIRON['PROJECT_ID'] - request_headers: - openstack-api-version: placement 1.8 - status: 404 - -- name: get total usages no project or user - GET: /usages - request_headers: - openstack-api-version: placement 1.9 - status: 400 - -- name: get empty usages with project id - GET: /usages?project_id=$ENVIRON['PROJECT_ID] - request_headers: - openstack-api-version: placement 1.9 - response_json_paths: - usages: {} - -- name: get empty usages with project id and user id - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=78725f09-5c01-4c9e-97a5-98d75e1e32b1 - request_headers: - openstack-api-version: placement 1.9 - response_json_paths: - usages: {} - -- name: get total usages project_id less than min length - GET: /usages?project_id= - request_headers: - openstack-api-version: placement 1.9 - status: 400 - response_strings: - - "Failed validating 'minLength'" - -- name: get total usages user_id less than min length - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id= - request_headers: - openstack-api-version: placement 1.9 - status: 400 - response_strings: - - "Failed validating 'minLength'" - -- name: get total usages project_id exceeds max length - GET: /usages?project_id=78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1 - request_headers: - openstack-api-version: placement 1.9 - status: 400 - response_strings: - - "Failed validating 'maxLength'" - -- name: get total usages user_id exceeds max length - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1 - request_headers: - openstack-api-version: placement 1.9 - status: 400 - response_strings: - - "Failed validating 'maxLength'" - -- name: get total usages with additional param - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=78725f09-5c01-4c9e-97a5-98d75e1e32b1&dummy=1 - request_headers: - openstack-api-version: placement 1.9 - status: 400 - response_strings: - - "Additional properties are not allowed" diff --git a/nova/tests/functional/api/openstack/placement/gabbits/with-allocations.yaml b/nova/tests/functional/api/openstack/placement/gabbits/with-allocations.yaml deleted file mode 100644 index 83f68440bd79..000000000000 --- a/nova/tests/functional/api/openstack/placement/gabbits/with-allocations.yaml +++ /dev/null @@ -1,159 +0,0 @@ - -fixtures: - - AllocationFixture - -defaults: - request_headers: - x-auth-token: admin - -tests: - -- name: confirm inventories - GET: /resource_providers/$ENVIRON['RP_UUID']/inventories - response_json_paths: - $.inventories.DISK_GB.total: 2048 - $.inventories.DISK_GB.reserved: 0 - -- name: get usages - GET: /resource_providers/$ENVIRON['RP_UUID']/usages - response_headers: - # use a regex here because charset, which is not only not - # required but superfluous, is present - content-type: /application/json/ - response_json_paths: - $.resource_provider_generation: 5 - $.usages.DISK_GB: 1020 - $.usages.VCPU: 7 - -- name: get allocations - GET: /resource_providers/$ENVIRON['RP_UUID']/allocations - response_headers: - content-type: /application/json/ - response_json_paths: - $.allocations.`len`: 3 - $.allocations["$ENVIRON['CONSUMER_0']"].resources: - DISK_GB: 1000 - $.allocations["$ENVIRON['CONSUMER_ID']"].resources: - VCPU: 6 - $.allocations["$ENVIRON['ALT_CONSUMER_ID']"].resources: - VCPU: 1 - DISK_GB: 20 - $.resource_provider_generation: 5 - -- name: fail to delete resource provider - DELETE: /resource_providers/$ENVIRON['RP_UUID'] - status: 409 - response_strings: - - "Unable to delete resource provider $ENVIRON['RP_UUID']: Resource provider has allocations." - -- name: fail to change inventory via put 1.23 - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - accept: application/json - content-type: application/json - openstack-api-version: placement 1.23 - data: - resource_provider_generation: 5 - inventories: {} - status: 409 - response_json_paths: - $.errors[0].code: placement.inventory.inuse - -- name: fail to delete all inventory - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - accept: application/json - openstack-api-version: placement 1.5 - status: 409 - response_headers: - content-type: /application/json/ - response_strings: - - "Inventory for 'VCPU, DISK_GB' on resource provider '$ENVIRON['RP_UUID']' in use" - -- name: fail to delete all inventory 1.23 - DELETE: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - accept: application/json - openstack-api-version: placement 1.23 - status: 409 - response_headers: - content-type: /application/json/ - response_strings: - - "Inventory for 'VCPU, DISK_GB' on resource provider '$ENVIRON['RP_UUID']' in use" - response_json_paths: - $.errors[0].code: placement.inventory.inuse - -# We can change inventory in a way that makes existing allocations exceed the -# new capacity. This is allowed. -- name: change inventory despite capacity exceeded - PUT: /resource_providers/$ENVIRON['RP_UUID']/inventories - request_headers: - accept: application/json - content-type: application/json - data: - resource_provider_generation: 5 - inventories: - DISK_GB: - total: 1019 - VCPU: - total: 97 - status: 200 - -- name: get total usages by project - GET: /usages?project_id=$ENVIRON['PROJECT_ID'] - request_headers: - openstack-api-version: placement 1.9 - status: 200 - response_json_paths: - $.usages.DISK_GB: 1020 - $.usages.VCPU: 7 - -- name: get total usages by project and user - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=$ENVIRON['USER_ID'] - request_headers: - openstack-api-version: placement 1.9 - status: 200 - response_json_paths: - $.usages.DISK_GB: 1000 - $.usages.VCPU: 6 - -- name: get total usages by project and alt user - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=$ENVIRON['ALT_USER_ID'] - request_headers: - openstack-api-version: placement 1.9 - status: 200 - # In pre 1.15 microversions cache headers not present - response_forbidden_headers: - - last-modified - - cache-control - response_json_paths: - $.usages.DISK_GB: 20 - $.usages.VCPU: 1 - -- name: get allocations without project and user - GET: /allocations/$ENVIRON['CONSUMER_ID'] - request_headers: - openstack-api-version: placement 1.11 - accept: application/json - response_json_paths: - # only one key in the top level object - $.`len`: 1 - -- name: get allocations with project and user - GET: /allocations/$ENVIRON['CONSUMER_ID'] - request_headers: - openstack-api-version: placement 1.12 - accept: application/json - response_json_paths: - $.project_id: $ENVIRON['PROJECT_ID'] - $.user_id: $ENVIRON['USER_ID'] - $.`len`: 3 - -- name: get total usages with cache headers - GET: /usages?project_id=$ENVIRON['PROJECT_ID']&user_id=$ENVIRON['ALT_USER_ID'] - request_headers: - openstack-api-version: placement 1.15 - response_headers: - cache-control: no-cache - # Does last-modified look like a legit timestamp? - last-modified: /^\w+, \d+ \w+ \d{4} [\d:]+ GMT$/ diff --git a/nova/tests/functional/api/openstack/placement/test_direct.py b/nova/tests/functional/api/openstack/placement/test_direct.py deleted file mode 100644 index f4e901645df0..000000000000 --- a/nova/tests/functional/api/openstack/placement/test_direct.py +++ /dev/null @@ -1,77 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg -from oslo_utils.fixture import uuidsentinel - -from nova.api.openstack.placement import direct -from nova.tests.functional.api.openstack.placement import base - - -CONF = cfg.CONF - - -class TestDirect(base.TestCase): - - def test_direct_is_there(self): - with direct.PlacementDirect(CONF) as client: - resp = client.get('/') - self.assertTrue(resp) - data = resp.json() - self.assertEqual('v1.0', data['versions'][0]['id']) - - def test_get_resource_providers(self): - with direct.PlacementDirect(CONF) as client: - resp = client.get('/resource_providers') - self.assertTrue(resp) - data = resp.json() - self.assertEqual([], data['resource_providers']) - - def test_create_resource_provider(self): - data = {'name': 'fake'} - with direct.PlacementDirect(CONF) as client: - resp = client.post('/resource_providers', json=data) - self.assertTrue(resp) - resp = client.get('/resource_providers') - self.assertTrue(resp) - data = resp.json() - self.assertEqual(1, len(data['resource_providers'])) - - def test_json_validation_happens(self): - data = {'name': 'fake', 'cowsay': 'moo'} - with direct.PlacementDirect(CONF) as client: - resp = client.post('/resource_providers', json=data) - self.assertFalse(resp) - self.assertEqual(400, resp.status_code) - - def test_microversion_handling(self): - with direct.PlacementDirect(CONF) as client: - # create parent - parent_data = {'name': uuidsentinel.p_rp, - 'uuid': uuidsentinel.p_rp} - resp = client.post('/resource_providers', json=parent_data) - self.assertTrue(resp, resp.text) - - # attempt to create child - data = {'name': 'child', 'parent_provider_uuid': uuidsentinel.p_rp} - # no microversion, 400 - resp = client.post('/resource_providers', json=data) - self.assertFalse(resp) - self.assertEqual(400, resp.status_code) - # low microversion, 400 - resp = client.post('/resource_providers', json=data, - microversion='1.13') - self.assertFalse(resp) - self.assertEqual(400, resp.status_code) - resp = client.post('/resource_providers', json=data, - microversion='1.14') - self.assertTrue(resp, resp.text) diff --git a/nova/tests/functional/api/openstack/placement/test_placement_api.py b/nova/tests/functional/api/openstack/placement/test_placement_api.py deleted file mode 100644 index e2448908383a..000000000000 --- a/nova/tests/functional/api/openstack/placement/test_placement_api.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os -from oslotest import output - -import wsgi_intercept - -from gabbi import driver - -from nova.tests.functional.api.openstack.placement.fixtures import capture -# TODO(cdent): This whitespace blight will go away post extraction. -from nova.tests.functional.api.openstack.placement.fixtures \ - import gabbits as fixtures - -# Check that wsgi application response headers are always -# native str. -wsgi_intercept.STRICT_RESPONSE_HEADERS = True -TESTS_DIR = 'gabbits' - - -def load_tests(loader, tests, pattern): - """Provide a TestSuite to the discovery process.""" - test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) - # These inner fixtures provide per test request output and log - # capture, for cleaner results reporting. - inner_fixtures = [ - output.CaptureOutput, - capture.Logging, - ] - return driver.build_tests(test_dir, loader, host=None, - test_loader_name=__name__, - intercept=fixtures.setup_app, - inner_fixtures=inner_fixtures, - fixture_module=fixtures) diff --git a/nova/tests/functional/api/openstack/placement/test_verify_policy.py b/nova/tests/functional/api/openstack/placement/test_verify_policy.py deleted file mode 100644 index a6fb602f72d1..000000000000 --- a/nova/tests/functional/api/openstack/placement/test_verify_policy.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from oslo_config import cfg - -from nova.api.openstack.placement import direct -from nova.api.openstack.placement import handler -from nova.tests.functional.api.openstack.placement import base - - -CONF = cfg.CONF - - -class TestVerifyPolicy(base.TestCase): - """Verify that all defined placement routes have a policy.""" - - # Paths that don't need a policy check - EXCEPTIONS = ['/', ''] - - def _test_request_403(self, client, method, route): - headers = { - 'x-auth-token': 'user', - 'content-type': 'application/json' - } - request_method = getattr(client, method.lower()) - # We send an empty request body on all requests. Because - # policy handling comes before other processing, the value - # of the body is irrelevant. - response = request_method(route, data='', headers=headers) - self.assertEqual( - 403, response.status_code, - 'method %s on route %s is open for user, status: %s' % - (method, route, response.status_code)) - - def test_verify_policy(self): - with direct.PlacementDirect(CONF, latest_microversion=True) as client: - for route, methods in handler.ROUTE_DECLARATIONS.items(): - if route in self.EXCEPTIONS: - continue - for method in methods: - self._test_request_403(client, method, route) diff --git a/nova/tests/functional/api_paste_fixture.py b/nova/tests/functional/api_paste_fixture.py index dad800865924..5c268c73070a 100644 --- a/nova/tests/functional/api_paste_fixture.py +++ b/nova/tests/functional/api_paste_fixture.py @@ -11,6 +11,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import absolute_import import os diff --git a/nova/tests/functional/api_sample_tests/api_sample_base.py b/nova/tests/functional/api_sample_tests/api_sample_base.py index 394c39a8fa7a..05df35d1d820 100644 --- a/nova/tests/functional/api_sample_tests/api_sample_base.py +++ b/nova/tests/functional/api_sample_tests/api_sample_base.py @@ -107,8 +107,6 @@ class ApiSampleTestBaseV21(testscenarios.WithScenarios, if not self.SUPPORTS_CELLS: self.useFixture(fixtures.Database()) self.useFixture(fixtures.Database(database='api')) - # FIXME(cdent): Placement db already provided by IntegratedHelpers - self.useFixture(fixtures.Database(database='placement')) self.useFixture(fixtures.DefaultFlavorsFixture()) self.useFixture(fixtures.SingleCellSimple()) diff --git a/nova/tests/unit/api_samples_test_base/test_compare_result.py b/nova/tests/functional/api_sample_tests/test_compare_result.py similarity index 100% rename from nova/tests/unit/api_samples_test_base/test_compare_result.py rename to nova/tests/functional/api_sample_tests/test_compare_result.py diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py index 0cb7b8280b6e..28e7cc471753 100644 --- a/nova/tests/functional/compute/test_resource_tracker.py +++ b/nova/tests/functional/compute/test_resource_tracker.py @@ -22,7 +22,7 @@ from nova import conf from nova import context from nova import objects from nova import rc_fields as fields -from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.functional import test_report_client as test_base from nova.virt import driver as virt_driver @@ -200,7 +200,7 @@ class IronicResourceTrackerTest(test_base.SchedulerReportClientTestBase): usage for an instance, the nodes still have their unique stats and nothing is leaked from node to node. """ - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) # Before the resource tracker is "initialized", we shouldn't have # any compute nodes or stats in the RT's cache... self.assertEqual(0, len(self.rt.compute_nodes)) diff --git a/nova/tests/functional/fixtures.py b/nova/tests/functional/fixtures.py new file mode 100644 index 000000000000..5c4305d91cf5 --- /dev/null +++ b/nova/tests/functional/fixtures.py @@ -0,0 +1,150 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Fixtures solely for functional tests.""" +from __future__ import absolute_import + +import fixtures +from keystoneauth1 import adapter as ka +from keystoneauth1 import session as ks +from placement.tests.functional.fixtures import placement as placement_fixtures +from requests import adapters + +from nova.tests.functional.api import client + + +class PlacementApiClient(object): + def __init__(self, placement_fixture): + self.fixture = placement_fixture + + def get(self, url, **kwargs): + return client.APIResponse(self.fixture._fake_get(None, url, **kwargs)) + + def put(self, url, body, **kwargs): + return client.APIResponse( + self.fixture._fake_put(None, url, body, **kwargs)) + + def post(self, url, body, **kwargs): + return client.APIResponse( + self.fixture._fake_post(None, url, body, **kwargs)) + + +class PlacementFixture(placement_fixtures.PlacementFixture): + """A fixture to placement operations. + + Runs a local WSGI server bound on a free port and having the Placement + application with NoAuth middleware. + This fixture also prevents calling the ServiceCatalog for getting the + endpoint. + + It's possible to ask for a specific token when running the fixtures so + all calls would be passing this token. + + Most of the time users of this fixture will also want the placement + database fixture to be called first, so that is done automatically. If + that is not desired pass ``db=False`` when initializing the fixture + and establish the database yourself with: + + self.useFixture(placement_fixtures.Database(set_config=True)) + """ + + def setUp(self): + super(PlacementFixture, self).setUp() + + # Turn off manipulation of socket_options in TCPKeepAliveAdapter + # to keep wsgi-intercept happy. Replace it with the method + # from its superclass. + self.useFixture(fixtures.MonkeyPatch( + 'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager', + adapters.HTTPAdapter.init_poolmanager)) + + self._client = ka.Adapter(ks.Session(auth=None), raise_exc=False) + # NOTE(sbauza): We need to mock the scheduler report client because + # we need to fake Keystone by directly calling the endpoint instead + # of looking up the service catalog, like we did for the OSAPIFixture. + self.useFixture(fixtures.MonkeyPatch( + 'nova.scheduler.client.report.SchedulerReportClient.get', + self._fake_get)) + self.useFixture(fixtures.MonkeyPatch( + 'nova.scheduler.client.report.SchedulerReportClient.post', + self._fake_post)) + self.useFixture(fixtures.MonkeyPatch( + 'nova.scheduler.client.report.SchedulerReportClient.put', + self._fake_put)) + self.useFixture(fixtures.MonkeyPatch( + 'nova.scheduler.client.report.SchedulerReportClient.delete', + self._fake_delete)) + + self.api = PlacementApiClient(self) + + @staticmethod + def _update_headers_with_version(headers, **kwargs): + version = kwargs.get("version") + if version is not None: + # TODO(mriedem): Perform some version discovery at some point. + headers.update({ + 'OpenStack-API-Version': 'placement %s' % version + }) + + def _fake_get(self, *args, **kwargs): + (url,) = args[1:] + # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 + # in case a token is not provided. We should change that by creating + # a fake token so we could remove adding the header below. + headers = {'x-auth-token': self.token} + self._update_headers_with_version(headers, **kwargs) + return self._client.get( + url, + endpoint_override=self.endpoint, + headers=headers) + + def _fake_post(self, *args, **kwargs): + (url, data) = args[1:] + # NOTE(sdague): using json= instead of data= sets the + # media type to application/json for us. Placement API is + # more sensitive to this than other APIs in the OpenStack + # ecosystem. + # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 + # in case a token is not provided. We should change that by creating + # a fake token so we could remove adding the header below. + headers = {'x-auth-token': self.token} + self._update_headers_with_version(headers, **kwargs) + return self._client.post( + url, json=data, + endpoint_override=self.endpoint, + headers=headers) + + def _fake_put(self, *args, **kwargs): + (url, data) = args[1:] + # NOTE(sdague): using json= instead of data= sets the + # media type to application/json for us. Placement API is + # more sensitive to this than other APIs in the OpenStack + # ecosystem. + # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 + # in case a token is not provided. We should change that by creating + # a fake token so we could remove adding the header below. + headers = {'x-auth-token': self.token} + self._update_headers_with_version(headers, **kwargs) + return self._client.put( + url, json=data, + endpoint_override=self.endpoint, + headers=headers) + + def _fake_delete(self, *args, **kwargs): + (url,) = args[1:] + # TODO(sbauza): The current placement NoAuthMiddleware returns a 401 + # in case a token is not provided. We should change that by creating + # a fake token so we could remove adding the header below. + return self._client.delete( + url, + endpoint_override=self.endpoint, + headers={'x-auth-token': self.token}) diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py index e4c8f1840e84..d8b6e4ad6a7e 100644 --- a/nova/tests/functional/integrated_helpers.py +++ b/nova/tests/functional/integrated_helpers.py @@ -33,6 +33,7 @@ from nova import objects from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client as api_client +from nova.tests.functional import fixtures as func_fixtures from nova.tests.unit import cast_as_call from nova.tests.unit import fake_notifier import nova.tests.unit.image.fake @@ -86,8 +87,7 @@ class _IntegratedTestBase(test.TestCase): nova.tests.unit.image.fake.stub_out_image_service(self) self.useFixture(cast_as_call.CastAsCall(self)) - self.useFixture(nova_fixtures.Database(database='placement')) - placement = self.useFixture(nova_fixtures.PlacementFixture()) + placement = self.useFixture(func_fixtures.PlacementFixture()) self.placement_api = placement.api self._setup_services() @@ -373,7 +373,7 @@ class ProviderUsageBaseTestCase(test.TestCase, InstanceHelperMixin): fake_notifier.stub_notifier(self) self.addCleanup(fake_notifier.reset) - placement = self.useFixture(nova_fixtures.PlacementFixture()) + placement = self.useFixture(func_fixtures.PlacementFixture()) self.placement_api = placement.api api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py index 772853fe9309..e3a2893541fb 100644 --- a/nova/tests/functional/libvirt/base.py +++ b/nova/tests/functional/libvirt/base.py @@ -16,7 +16,7 @@ import fixtures import mock -from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import test_servers as base from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_libvirt_utils @@ -43,7 +43,7 @@ class ServersTestBase(base.ServersTestBase): 'nova.virt.libvirt.guest.libvirt', fakelibvirt)) self.useFixture(fakelibvirt.FakeLibvirtFixture()) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) # Mock the 'get_connection' function, as we're going to need to provide # custom capabilities for each test diff --git a/nova/tests/functional/notification_sample_tests/notification_sample_base.py b/nova/tests/functional/notification_sample_tests/notification_sample_base.py index 568428447c65..2600785dfa22 100644 --- a/nova/tests/functional/notification_sample_tests/notification_sample_base.py +++ b/nova/tests/functional/notification_sample_tests/notification_sample_base.py @@ -22,6 +22,7 @@ from oslo_utils import fixture as utils_fixture from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests import json_ref from nova.tests.unit.api.openstack.compute import test_services @@ -83,7 +84,7 @@ class NotificationSampleTestBase(test.TestCase, # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) context_patcher = self.mock_gen_request_id = mock.patch( 'oslo_context.context.generate_request_id', diff --git a/nova/tests/functional/regressions/test_bug_1595962.py b/nova/tests/functional/regressions/test_bug_1595962.py index 80c0a6a52e70..06004159b444 100644 --- a/nova/tests/functional/regressions/test_bug_1595962.py +++ b/nova/tests/functional/regressions/test_bug_1595962.py @@ -20,6 +20,7 @@ import mock import nova from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.unit import cast_as_call from nova.tests.unit import policy_fixture from nova.tests.unit.virt.libvirt import fake_libvirt_utils @@ -34,7 +35,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase): super(TestSerialConsoleLiveMigrate, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) # Replace libvirt with fakelibvirt diff --git a/nova/tests/functional/regressions/test_bug_1671648.py b/nova/tests/functional/regressions/test_bug_1671648.py index 2a2a6b6954e8..a09b2b4168ad 100644 --- a/nova/tests/functional/regressions/test_bug_1671648.py +++ b/nova/tests/functional/regressions/test_bug_1671648.py @@ -18,6 +18,7 @@ import nova.compute.resource_tracker from nova import exception from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.unit import cast_as_call from nova.tests.unit import fake_network import nova.tests.unit.image.fake @@ -50,7 +51,7 @@ class TestRetryBetweenComputeNodeBuilds(test.TestCase): # We need the computes reporting into placement for the filter # scheduler to pick a host. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1675570.py b/nova/tests/functional/regressions/test_bug_1675570.py index f7c5c8cbd90b..8aade45f7cf4 100644 --- a/nova/tests/functional/regressions/test_bug_1675570.py +++ b/nova/tests/functional/regressions/test_bug_1675570.py @@ -21,6 +21,7 @@ from nova.compute import api as compute_api from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client +from nova.tests.functional import fixtures as func_fixtures from nova.tests.unit import cast_as_call import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture @@ -54,7 +55,7 @@ class TestLocalDeleteAttachedVolumes(test.TestCase): # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api diff --git a/nova/tests/functional/regressions/test_bug_1679750.py b/nova/tests/functional/regressions/test_bug_1679750.py index 7968a036b183..8d5c55e30f52 100644 --- a/nova/tests/functional/regressions/test_bug_1679750.py +++ b/nova/tests/functional/regressions/test_bug_1679750.py @@ -10,8 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_config import cfg +from oslo_config import fixture as config_fixture +from placement import conf as placement_conf +from placement.tests import fixtures as placement_db + from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture @@ -55,10 +61,17 @@ class TestLocalDeleteAllocations(test.TestCase, have been cleaned up once the nova-compute service restarts. In this scenario we conditionally use the PlacementFixture to simulate - the case that nova-api isn't configured to talk to placement. + the case that nova-api isn't configured to talk to placement, thus we + need to manage the placement database independently. """ + config = cfg.ConfigOpts() + placement_config = self.useFixture(config_fixture.Config(config)) + placement_conf.register_opts(config) + self.useFixture(placement_db.Database(placement_config, + set_config=True)) # Get allocations, make sure they are 0. - with nova_fixtures.PlacementFixture() as placement: + with func_fixtures.PlacementFixture( + conf_fixture=placement_config, db=False) as placement: compute = self.start_service('compute') placement_api = placement.api resp = placement_api.get('/resource_providers') @@ -89,7 +102,8 @@ class TestLocalDeleteAllocations(test.TestCase, self.api.delete_server(server['id']) self._wait_until_deleted(server) - with nova_fixtures.PlacementFixture() as placement: + with func_fixtures.PlacementFixture( + conf_fixture=placement_config, db=False) as placement: placement_api = placement.api # Assert usages are still non-zero. usages_during = self._get_usages(placement_api, rp_uuid) @@ -111,7 +125,7 @@ class TestLocalDeleteAllocations(test.TestCase, """Tests that the compute API deletes allocations when the compute service on which the instance was running is down. """ - placement_api = self.useFixture(nova_fixtures.PlacementFixture()).api + placement_api = self.useFixture(func_fixtures.PlacementFixture()).api compute = self.start_service('compute') # Get allocations, make sure they are 0. resp = placement_api.get('/resource_providers') diff --git a/nova/tests/functional/regressions/test_bug_1682693.py b/nova/tests/functional/regressions/test_bug_1682693.py index 485bab9bb2ca..7df7b113d2b6 100644 --- a/nova/tests/functional/regressions/test_bug_1682693.py +++ b/nova/tests/functional/regressions/test_bug_1682693.py @@ -14,6 +14,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as image_fake from nova.tests.unit import policy_fixture @@ -34,7 +35,7 @@ class ServerTagsFilteringTest(test.TestCase, # The NeutronFixture is needed to stub out validate_networks in API. self.useFixture(nova_fixtures.NeutronFixture(self)) # Use the PlacementFixture to avoid annoying warnings in the logs. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) self.api = api_fixture.api diff --git a/nova/tests/functional/regressions/test_bug_1702454.py b/nova/tests/functional/regressions/test_bug_1702454.py index 9c7c0ee72948..c4d048a49861 100644 --- a/nova/tests/functional/regressions/test_bug_1702454.py +++ b/nova/tests/functional/regressions/test_bug_1702454.py @@ -13,6 +13,7 @@ from nova.scheduler import weights from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import cast_as_call from nova.tests.unit.image import fake as image_fake @@ -58,7 +59,7 @@ class SchedulerOnlyChecksTargetTest(test.TestCase, # We need the computes reporting into placement for the filter # scheduler to pick a host. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py index bc023e779c6b..164c9eb04edc 100644 --- a/nova/tests/functional/regressions/test_bug_1713783.py +++ b/nova/tests/functional/regressions/test_bug_1713783.py @@ -18,6 +18,7 @@ from oslo_log import log as logging from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network from nova.tests.unit import fake_notifier @@ -46,7 +47,7 @@ class FailedEvacuateStateTests(test.TestCase, self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1718455.py b/nova/tests/functional/regressions/test_bug_1718455.py index 6320e8732bf7..51306bd3d90a 100644 --- a/nova/tests/functional/regressions/test_bug_1718455.py +++ b/nova/tests/functional/regressions/test_bug_1718455.py @@ -14,6 +14,7 @@ import time from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network import nova.tests.unit.image.fake @@ -46,7 +47,7 @@ class TestLiveMigrateOneOfConcurrentlyCreatedInstances( self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1718512.py b/nova/tests/functional/regressions/test_bug_1718512.py index b96a69aec8d1..8c1799151b9a 100644 --- a/nova/tests/functional/regressions/test_bug_1718512.py +++ b/nova/tests/functional/regressions/test_bug_1718512.py @@ -16,6 +16,7 @@ from nova import objects from nova.scheduler import weights from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as image_fake from nova.tests.unit import policy_fixture @@ -50,7 +51,7 @@ class TestRequestSpecRetryReschedule(test.TestCase, # We need the computes reporting into placement for the filter # scheduler to pick a host. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1719730.py b/nova/tests/functional/regressions/test_bug_1719730.py index 625949148033..188e952f784c 100644 --- a/nova/tests/functional/regressions/test_bug_1719730.py +++ b/nova/tests/functional/regressions/test_bug_1719730.py @@ -13,6 +13,7 @@ from nova import exception from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network import nova.tests.unit.image.fake @@ -46,7 +47,7 @@ class TestRescheduleWithServerGroup(test.TestCase, # We need the computes reporting into placement for the filter # scheduler to pick a host. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1735407.py b/nova/tests/functional/regressions/test_bug_1735407.py index c05f6e10195c..446254fa3614 100644 --- a/nova/tests/functional/regressions/test_bug_1735407.py +++ b/nova/tests/functional/regressions/test_bug_1735407.py @@ -12,6 +12,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network from nova.tests.unit import fake_notifier @@ -39,7 +40,7 @@ class TestParallelEvacuationWithServerGroup( # We need the computes reporting into placement for the filter # scheduler to pick a host. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1741307.py b/nova/tests/functional/regressions/test_bug_1741307.py index 811352b0de8d..c8824f23178e 100644 --- a/nova/tests/functional/regressions/test_bug_1741307.py +++ b/nova/tests/functional/regressions/test_bug_1741307.py @@ -12,6 +12,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture @@ -48,7 +49,7 @@ class TestResizeWithNoAllocationScheduler( self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1746483.py b/nova/tests/functional/regressions/test_bug_1746483.py index 64dc0c6731ee..939ee773bf5a 100644 --- a/nova/tests/functional/regressions/test_bug_1746483.py +++ b/nova/tests/functional/regressions/test_bug_1746483.py @@ -13,6 +13,7 @@ from nova import config from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as image_fakes from nova.tests.unit import policy_fixture @@ -40,7 +41,7 @@ class TestBootFromVolumeIsolatedHostsFilter( self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py index 8025a50c660c..1bbf58776f92 100644 --- a/nova/tests/functional/regressions/test_bug_1764883.py +++ b/nova/tests/functional/regressions/test_bug_1764883.py @@ -13,6 +13,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network from nova.tests.unit import fake_notifier @@ -43,7 +44,7 @@ class TestEvacuationWithSourceReturningDuringRebuild( # We need the computes reporting into placement for the filter # scheduler to pick a host. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1780373.py b/nova/tests/functional/regressions/test_bug_1780373.py index 1c54efde4a18..07abcd83917e 100644 --- a/nova/tests/functional/regressions/test_bug_1780373.py +++ b/nova/tests/functional/regressions/test_bug_1780373.py @@ -12,6 +12,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as fake_image from nova.tests.unit import policy_fixture @@ -40,7 +41,7 @@ class TestMultiCreateServerGroupMemberOverQuota( self.flags(server_group_members=2, group='quota') self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1781710.py b/nova/tests/functional/regressions/test_bug_1781710.py index ace4a6e8792f..595eb9e5442f 100644 --- a/nova/tests/functional/regressions/test_bug_1781710.py +++ b/nova/tests/functional/regressions/test_bug_1781710.py @@ -14,6 +14,7 @@ from nova.scheduler import filter_scheduler from nova.scheduler import weights from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as image_fake from nova.tests.unit import policy_fixture @@ -49,7 +50,7 @@ class AntiAffinityMultiCreateRequest(test.TestCase, super(AntiAffinityMultiCreateRequest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1784353.py b/nova/tests/functional/regressions/test_bug_1784353.py index 837c31e33dfd..3bcb6e3b2ce1 100644 --- a/nova/tests/functional/regressions/test_bug_1784353.py +++ b/nova/tests/functional/regressions/test_bug_1784353.py @@ -12,6 +12,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import fake_network import nova.tests.unit.image.fake @@ -39,7 +40,7 @@ class TestRescheduleWithVolumesAttached( fake_network.set_stub_network_methods(self) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1797580.py b/nova/tests/functional/regressions/test_bug_1797580.py index 2af23f53d679..8a6cf173e122 100644 --- a/nova/tests/functional/regressions/test_bug_1797580.py +++ b/nova/tests/functional/regressions/test_bug_1797580.py @@ -12,6 +12,7 @@ from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as image_fake from nova.tests.unit import policy_fixture @@ -40,7 +41,7 @@ class ColdMigrateTargetHostThenLiveMigrateTest( super(ColdMigrateTargetHostThenLiveMigrateTest, self).setUp() self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/regressions/test_bug_1806064.py b/nova/tests/functional/regressions/test_bug_1806064.py index 8e9824713735..a16458f572d1 100644 --- a/nova/tests/functional/regressions/test_bug_1806064.py +++ b/nova/tests/functional/regressions/test_bug_1806064.py @@ -16,6 +16,7 @@ from nova import exception from nova import objects from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import policy_fixture @@ -50,7 +51,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest( # Use the standard fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) self.api = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')).api # Use microversion 2.52 which allows creating a server with tags. diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py index 1fd84baf6e2b..ead5eacc58bb 100644 --- a/nova/tests/functional/test_aggregates.py +++ b/nova/tests/functional/test_aggregates.py @@ -21,6 +21,7 @@ from nova import context as nova_context from nova.scheduler import weights from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture @@ -71,7 +72,7 @@ class AggregateRequestFiltersTest(test.TestCase, self.useFixture(nova_fixtures.NeutronFixture(self)) self.useFixture(nova_fixtures.AllServicesCurrent()) - placement = self.useFixture(nova_fixtures.PlacementFixture()) + placement = self.useFixture(func_fixtures.PlacementFixture()) self.placement_api = placement.api api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) @@ -381,7 +382,7 @@ class TestAggregateMultiTenancyIsolationFilter( # Stub out glance, placement and neutron. nova.tests.unit.image.fake.stub_out_image_service(self) self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) # Start nova services. self.start_service('conductor') diff --git a/nova/tests/functional/test_list_servers_ip_filter.py b/nova/tests/functional/test_list_servers_ip_filter.py index 7c27534d0420..b52242f85e7f 100644 --- a/nova/tests/functional/test_list_servers_ip_filter.py +++ b/nova/tests/functional/test_list_servers_ip_filter.py @@ -18,6 +18,7 @@ import nova.scheduler.utils import nova.servicegroup from nova import test from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures from nova.tests.unit import cast_as_call import nova.tests.unit.image.fake from nova.tests.unit import policy_fixture @@ -39,7 +40,7 @@ class TestListServersIpFilter(test.TestCase): # the image fake backend needed for image discovery nova.tests.unit.image.fake.stub_out_image_service(self) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) self.start_service('conductor') self.flags(enabled_filters=['ComputeFilter'], diff --git a/nova/tests/functional/test_metadata.py b/nova/tests/functional/test_metadata.py index ccabcf6172c3..d1d664c07329 100644 --- a/nova/tests/functional/test_metadata.py +++ b/nova/tests/functional/test_metadata.py @@ -12,6 +12,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import absolute_import import fixtures import requests diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py index 03c49292981d..4ee1164df982 100644 --- a/nova/tests/functional/test_nova_manage.py +++ b/nova/tests/functional/test_nova_manage.py @@ -9,6 +9,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from __future__ import absolute_import import mock diff --git a/nova/tests/functional/test_nova_status.py b/nova/tests/functional/test_nova_status.py new file mode 100644 index 000000000000..69889fb9f02a --- /dev/null +++ b/nova/tests/functional/test_nova_status.py @@ -0,0 +1,289 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from __future__ import absolute_import + +import copy +import fixtures +from six.moves import StringIO + +from oslo_config import cfg +from oslo_config import fixture as config_fixture +from oslo_utils.fixture import uuidsentinel as uuids +from oslo_utils import uuidutils +import placement.db_api + +from nova.cmd import status +import nova.conf +from nova import context +# NOTE(mriedem): We only use objects as a convenience to populate the database +# in the tests, we don't use them in the actual CLI. +from nova import objects +from nova import rc_fields as fields +from nova import test +from nova.tests import fixtures as nova_fixtures +from nova.tests.functional import fixtures as func_fixtures + +CONF = nova.conf.CONF + +# This is what the ResourceTracker sets up in the nova-compute service. +FAKE_VCPU_INVENTORY = { + 'resource_class': fields.ResourceClass.VCPU, + 'total': 32, + 'reserved': 4, + 'min_unit': 1, + 'max_unit': 1, + 'step_size': 1, + 'allocation_ratio': 1.0, +} + +# This is the kind of thing that Neutron will setup externally for routed +# networks. +FAKE_IP_POOL_INVENTORY = { + 'resource_class': fields.ResourceClass.IPV4_ADDRESS, + 'total': 256, + 'reserved': 10, + 'min_unit': 1, + 'max_unit': 1, + 'step_size': 1, + 'allocation_ratio': 1.0, +} + + +class TestUpgradeCheckResourceProviders(test.NoDBTestCase): + """Tests for the nova-status upgrade check on resource providers.""" + + # We'll setup the database ourselves because we need to use cells fixtures + # for multiple cell mappings. + USES_DB_SELF = True + + def setUp(self): + super(TestUpgradeCheckResourceProviders, self).setUp() + self.output = StringIO() + self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) + # We always need the API DB to be setup. + self.useFixture(nova_fixtures.Database(database='api')) + # Setting up the placement fixtures is complex because we need + # the status command (which access the placement tables directly) + # to have access to the right database engine. So first we create + # a config, then the PlacementFixtur and then monkey patch the + # old placement get_placement_engine code. + config = cfg.ConfigOpts() + conf_fixture = self.useFixture(config_fixture.Config(config)) + placement_fixture = self.useFixture(func_fixtures.PlacementFixture( + conf_fixture=conf_fixture, db=True)) + self.placement_api = placement_fixture.api + # We need the code in status to be using the database we've set up. + self.useFixture( + fixtures.MonkeyPatch( + 'nova.api.openstack.placement.db_api.get_placement_engine', + placement.db_api.get_placement_engine)) + self.cmd = status.UpgradeCommands() + + def test_check_resource_providers_fresh_install_no_mappings(self): + """Tests the scenario where we don't have any cell mappings (no cells + v2 setup yet) and no compute nodes in the single main database. + """ + # We don't have a cell mapping, just the regular old main database + # because let's assume they haven't run simple_cell_setup yet. + self.useFixture(nova_fixtures.Database()) + result = self.cmd._check_resource_providers() + # this is assumed to be base install so it's OK but with details + self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) + self.assertIn('There are no compute resource providers in the ' + 'Placement service nor are there compute nodes in the ' + 'database', + result.details) + + def test_check_resource_providers_no_rps_no_computes_in_cell1(self): + """Tests the scenario where we have a cell mapping with no computes in + it and no resource providers (because of no computes). + """ + # this will setup two cell mappings, one for cell0 and a single cell1 + self._setup_cells() + # there are no compute nodes in the cell1 database so we have 0 + # resource providers and 0 compute nodes, so it's assumed to be a fresh + # install and not a failure. + result = self.cmd._check_resource_providers() + # this is assumed to be base install so it's OK but with details + self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) + self.assertIn('There are no compute resource providers in the ' + 'Placement service nor are there compute nodes in the ' + 'database', + result.details) + + def test_check_resource_providers_no_rps_one_compute(self): + """Tests the scenario where we have compute nodes in the cell but no + resource providers yet - VCPU or otherwise. This is a warning because + the compute isn't reporting into placement. + """ + self._setup_cells() + # create a compute node which will be in cell1 by default + cn = objects.ComputeNode( + context=context.get_admin_context(), + host='fake-host', + vcpus=4, + memory_mb=8 * 1024, + local_gb=40, + vcpus_used=2, + memory_mb_used=2 * 1024, + local_gb_used=10, + hypervisor_type='fake', + hypervisor_version=1, + cpu_info='{"arch": "x86_64"}') + cn.create() + result = self.cmd._check_resource_providers() + self.assertEqual(status.UpgradeCheckCode.WARNING, result.code) + self.assertIn('There are no compute resource providers in the ' + 'Placement service but there are 1 compute nodes in the ' + 'deployment.', result.details) + + def _create_resource_provider(self, inventory): + """Helper method to create a resource provider with inventory over + the placement HTTP API. + """ + # We must copy the incoming inventory because it will be used in + # other tests in this module, and we pop from it, below. + inventory = copy.copy(inventory) + rp_uuid = uuidutils.generate_uuid() + rp = {'name': rp_uuid, 'uuid': rp_uuid} + url = '/resource_providers' + resp = self.placement_api.post(url, body=rp) + self.assertTrue(resp.status < 400, resp.body) + res_class = inventory.pop('resource_class') + data = {'inventories': {res_class: inventory}} + data['resource_provider_generation'] = 0 + url = '/resource_providers/%s/inventories' % rp_uuid + resp = self.placement_api.put(url, body=data) + self.assertTrue(resp.status < 400, resp.body) + + def test_check_resource_providers_no_compute_rps_one_compute(self): + """Tests the scenario where we have compute nodes in the cell but no + compute (VCPU) resource providers yet. This is a failure warning the + compute isn't reporting into placement. + """ + self._setup_cells() + # create a compute node which will be in cell1 by default + cn = objects.ComputeNode( + context=context.get_admin_context(), + host='fake-host', + vcpus=4, + memory_mb=8 * 1024, + local_gb=40, + vcpus_used=2, + memory_mb_used=2 * 1024, + local_gb_used=10, + hypervisor_type='fake', + hypervisor_version=1, + cpu_info='{"arch": "x86_64"}') + cn.create() + + # create a single resource provider that represents an external shared + # IP allocation pool - this tests our filtering when counting resource + # providers + self._create_resource_provider(FAKE_IP_POOL_INVENTORY) + + result = self.cmd._check_resource_providers() + self.assertEqual(status.UpgradeCheckCode.WARNING, result.code) + self.assertIn('There are no compute resource providers in the ' + 'Placement service but there are 1 compute nodes in the ' + 'deployment.', result.details) + + def test_check_resource_providers_fewer_rps_than_computes(self): + """Tests the scenario that we have fewer resource providers than + compute nodes which is a warning because we're underutilized. + """ + # setup the cell0 and cell1 mappings + self._setup_cells() + + # create two compute nodes (by default in cell1) + ctxt = context.get_admin_context() + for x in range(2): + cn = objects.ComputeNode( + context=ctxt, + host=getattr(uuids, str(x)), + vcpus=4, + memory_mb=8 * 1024, + local_gb=40, + vcpus_used=2, + memory_mb_used=2 * 1024, + local_gb_used=10, + hypervisor_type='fake', + hypervisor_version=1, + cpu_info='{"arch": "x86_64"}') + cn.create() + + # create a single resource provider with some VCPU inventory + self._create_resource_provider(FAKE_VCPU_INVENTORY) + + result = self.cmd._check_resource_providers() + self.assertEqual(status.UpgradeCheckCode.WARNING, result.code) + self.assertIn('There are 1 compute resource providers and 2 compute ' + 'nodes in the deployment.', result.details) + + def test_check_resource_providers_equal_rps_to_computes(self): + """This tests the happy path scenario where we have an equal number + of compute resource providers to compute nodes. + """ + # setup the cell0 and cell1 mappings + self._setup_cells() + + # create a single compute node + ctxt = context.get_admin_context() + cn = objects.ComputeNode( + context=ctxt, + host=uuids.host, + vcpus=4, + memory_mb=8 * 1024, + local_gb=40, + vcpus_used=2, + memory_mb_used=2 * 1024, + local_gb_used=10, + hypervisor_type='fake', + hypervisor_version=1, + cpu_info='{"arch": "x86_64"}') + cn.create() + + # create a deleted compute node record (shouldn't count) + cn2 = objects.ComputeNode( + context=ctxt, + host='fakehost', + vcpus=4, + memory_mb=8 * 1024, + local_gb=40, + vcpus_used=2, + memory_mb_used=2 * 1024, + local_gb_used=10, + hypervisor_type='fake', + hypervisor_version=1, + cpu_info='{"arch": "x86_64"}') + cn2.create() + cn2.destroy() + + # create a single resource provider with some VCPU inventory + self._create_resource_provider(FAKE_VCPU_INVENTORY) + # create an externally shared IP allocation pool resource provider + self._create_resource_provider(FAKE_IP_POOL_INVENTORY) + + # Stub out _count_compute_nodes to make sure we never call it without + # a cell-targeted context. + original_count_compute_nodes = ( + status.UpgradeCommands._count_compute_nodes) + + def stub_count_compute_nodes(_self, context=None): + self.assertIsNotNone(context.db_connection) + return original_count_compute_nodes(_self, context=context) + self.stub_out('nova.cmd.status.UpgradeCommands._count_compute_nodes', + stub_count_compute_nodes) + + result = self.cmd._check_resource_providers() + self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) + self.assertIsNone(result.details) diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py index 3eaa88d54846..30ba681bec66 100644 --- a/nova/tests/functional/test_report_client.py +++ b/nova/tests/functional/test_report_client.py @@ -14,10 +14,13 @@ import copy from keystoneauth1 import exceptions as kse import mock +from oslo_config import cfg +from oslo_config import fixture as config_fixture from oslo_utils.fixture import uuidsentinel as uuids import pkg_resources +from placement import direct +from placement.tests import fixtures as placement_db -from nova.api.openstack.placement import direct from nova.cmd import status from nova.compute import provider_tree from nova import conf @@ -82,6 +85,14 @@ class VersionCheckingReportClient(report.SchedulerReportClient): class SchedulerReportClientTestBase(test.TestCase): + def setUp(self): + super(SchedulerReportClientTestBase, self).setUp() + # Because these tests use PlacementDirect we need to manage + # the database ourselves. + config = cfg.ConfigOpts() + placement_conf = self.useFixture(config_fixture.Config(config)) + self.useFixture(placement_db.Database(placement_conf, set_config=True)) + def _interceptor(self, app=None, latest_microversion=True): """Set up an intercepted placement API to test against. diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py index 359762e6a3c0..b8dadcb8d04b 100644 --- a/nova/tests/functional/test_server_group.py +++ b/nova/tests/functional/test_server_group.py @@ -25,6 +25,7 @@ from nova.db.sqlalchemy import api as db_api from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit import policy_fixture from nova import utils @@ -75,7 +76,7 @@ class ServerGroupTestBase(test.TestCase, self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) @@ -982,7 +983,7 @@ class TestAntiAffinityLiveMigration(test.TestCase, # Setup common fixtures. self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) # Setup API. api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) diff --git a/nova/tests/functional/wsgi/test_servers.py b/nova/tests/functional/wsgi/test_servers.py index a753b05ad888..5cd5a3f191db 100644 --- a/nova/tests/functional/wsgi/test_servers.py +++ b/nova/tests/functional/wsgi/test_servers.py @@ -19,6 +19,7 @@ from nova.policies import servers as servers_policies from nova import test from nova.tests import fixtures as nova_fixtures from nova.tests.functional.api import client as api_client +from nova.tests.functional import fixtures as func_fixtures from nova.tests.functional import integrated_helpers from nova.tests.unit.image import fake as fake_image from nova.tests.unit import policy_fixture @@ -48,7 +49,7 @@ class ServersPreSchedulingTestCase(test.TestCase, self.useFixture(policy_fixture.RealPolicyFixture()) self.useFixture(nova_fixtures.NoopConductorFixture()) self.useFixture(nova_fixtures.NeutronFixture(self)) - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) api_fixture = self.useFixture(nova_fixtures.OSAPIFixture( api_version='v2.1')) @@ -408,7 +409,7 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase( # For this test, we want to start conductor and the scheduler but # we don't start compute so that scheduling fails; we don't really # care about successfully building an active server here. - self.useFixture(nova_fixtures.PlacementFixture()) + self.useFixture(func_fixtures.PlacementFixture()) self.useFixture(nova_fixtures.CinderFixture(self)) self.start_service('conductor') self.start_service('scheduler') diff --git a/nova/tests/unit/api/openstack/placement/__init__.py b/nova/tests/unit/api/openstack/placement/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/unit/api/openstack/placement/handlers/__init__.py b/nova/tests/unit/api/openstack/placement/handlers/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/unit/api/openstack/placement/handlers/test_aggregate.py b/nova/tests/unit/api/openstack/placement/handlers/test_aggregate.py deleted file mode 100644 index 64db6f6fda5b..000000000000 --- a/nova/tests/unit/api/openstack/placement/handlers/test_aggregate.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for code in the aggregate handler that gabbi isn't covering.""" - -import mock -import six -import testtools -import webob - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.handlers import aggregate -from nova.api.openstack.placement.objects import resource_provider - - -class TestAggregateHandlerErrors(testtools.TestCase): - """Tests that make sure errors hard to trigger by gabbi result in expected - exceptions. - """ - - def test_concurrent_exception_causes_409(self): - rp = resource_provider.ResourceProvider() - expected_message = ('Update conflict: Another thread concurrently ' - 'updated the data') - with mock.patch.object(rp, "set_aggregates", - side_effect=exception.ConcurrentUpdateDetected): - exc = self.assertRaises(webob.exc.HTTPConflict, - aggregate._set_aggregates, rp, []) - self.assertIn(expected_message, six.text_type(exc)) diff --git a/nova/tests/unit/api/openstack/placement/objects/__init__.py b/nova/tests/unit/api/openstack/placement/objects/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/unit/api/openstack/placement/objects/test_resource_provider.py b/nova/tests/unit/api/openstack/placement/objects/test_resource_provider.py deleted file mode 100644 index 1918fa243804..000000000000 --- a/nova/tests/unit/api/openstack/placement/objects/test_resource_provider.py +++ /dev/null @@ -1,330 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -from oslo_utils.fixture import uuidsentinel as uuids -from oslo_utils import timeutils -import six -import testtools - -from nova.api.openstack.placement import context -from nova.api.openstack.placement import exception -from nova.api.openstack.placement.objects import resource_provider -from nova import rc_fields as fields - - -_RESOURCE_CLASS_NAME = 'DISK_GB' -_RESOURCE_CLASS_ID = 2 -IPV4_ADDRESS_ID = fields.ResourceClass.STANDARD.index( - fields.ResourceClass.IPV4_ADDRESS) -VCPU_ID = fields.ResourceClass.STANDARD.index( - fields.ResourceClass.VCPU) - -_RESOURCE_PROVIDER_ID = 1 -_RESOURCE_PROVIDER_UUID = uuids.resource_provider -_RESOURCE_PROVIDER_NAME = six.text_type(uuids.resource_name) -_RESOURCE_PROVIDER_DB = { - 'id': _RESOURCE_PROVIDER_ID, - 'uuid': _RESOURCE_PROVIDER_UUID, - 'name': _RESOURCE_PROVIDER_NAME, - 'generation': 0, - 'root_provider_uuid': _RESOURCE_PROVIDER_UUID, - 'parent_provider_uuid': None, - 'updated_at': None, - 'created_at': timeutils.utcnow(with_timezone=True), -} - -_RESOURCE_PROVIDER_ID2 = 2 -_RESOURCE_PROVIDER_UUID2 = uuids.resource_provider2 -_RESOURCE_PROVIDER_NAME2 = uuids.resource_name2 -_RESOURCE_PROVIDER_DB2 = { - 'id': _RESOURCE_PROVIDER_ID2, - 'uuid': _RESOURCE_PROVIDER_UUID2, - 'name': _RESOURCE_PROVIDER_NAME2, - 'generation': 0, - 'root_provider_uuid': _RESOURCE_PROVIDER_UUID, - 'parent_provider_uuid': _RESOURCE_PROVIDER_UUID, -} - - -_INVENTORY_ID = 2 -_INVENTORY_DB = { - 'id': _INVENTORY_ID, - 'resource_provider_id': _RESOURCE_PROVIDER_ID, - 'resource_class_id': _RESOURCE_CLASS_ID, - 'total': 16, - 'reserved': 2, - 'min_unit': 1, - 'max_unit': 8, - 'step_size': 1, - 'allocation_ratio': 1.0, - 'updated_at': None, - 'created_at': timeutils.utcnow(with_timezone=True), -} -_ALLOCATION_ID = 2 -_ALLOCATION_DB = { - 'id': _ALLOCATION_ID, - 'resource_provider_id': _RESOURCE_PROVIDER_ID, - 'resource_class_id': _RESOURCE_CLASS_ID, - 'consumer_uuid': uuids.fake_instance, - 'consumer_id': 1, - 'consumer_generation': 0, - 'used': 8, - 'user_id': 1, - 'user_external_id': uuids.user_id, - 'project_id': 1, - 'project_external_id': uuids.project_id, -} - - -def _fake_ensure_cache(ctxt): - cache = resource_provider._RC_CACHE = mock.MagicMock() - cache.string_from_id.return_value = _RESOURCE_CLASS_NAME - cache.id_from_string.return_value = _RESOURCE_CLASS_ID - - -class _TestCase(testtools.TestCase): - """Base class for other tests in this file. - - It establishes the RequestContext used as self.context in the tests. - """ - - def setUp(self): - super(_TestCase, self).setUp() - self.user_id = 'fake-user' - self.project_id = 'fake-project' - self.context = context.RequestContext(self.user_id, self.project_id) - - -class TestResourceProviderNoDB(_TestCase): - - def test_create_id_fail(self): - obj = resource_provider.ResourceProvider(context=self.context, - uuid=_RESOURCE_PROVIDER_UUID, - id=_RESOURCE_PROVIDER_ID) - self.assertRaises(exception.ObjectActionError, - obj.create) - - def test_create_no_uuid_fail(self): - obj = resource_provider.ResourceProvider(context=self.context) - self.assertRaises(exception.ObjectActionError, - obj.create) - - def test_create_with_root_provider_uuid_fail(self): - obj = resource_provider.ResourceProvider( - context=self.context, - uuid=_RESOURCE_PROVIDER_UUID, - name=_RESOURCE_PROVIDER_NAME, - root_provider_uuid=_RESOURCE_PROVIDER_UUID, - ) - - exc = self.assertRaises(exception.ObjectActionError, obj.create) - self.assertIn('root provider UUID cannot be manually set', str(exc)) - - def test_save_immutable(self): - fields = { - 'id': 1, - 'uuid': _RESOURCE_PROVIDER_UUID, - 'generation': 1, - 'root_provider_uuid': _RESOURCE_PROVIDER_UUID, - } - for field in fields: - rp = resource_provider.ResourceProvider(context=self.context) - setattr(rp, field, fields[field]) - self.assertRaises(exception.ObjectActionError, rp.save) - - -class TestProviderSummaryNoDB(_TestCase): - - def test_resource_class_names(self): - psum = resource_provider.ProviderSummary(mock.sentinel.ctx) - disk_psr = resource_provider.ProviderSummaryResource( - mock.sentinel.ctx, resource_class=fields.ResourceClass.DISK_GB, - capacity=100, used=0) - ram_psr = resource_provider.ProviderSummaryResource( - mock.sentinel.ctx, resource_class=fields.ResourceClass.MEMORY_MB, - capacity=1024, used=0) - psum.resources = [disk_psr, ram_psr] - expected = set(['DISK_GB', 'MEMORY_MB']) - self.assertEqual(expected, psum.resource_class_names) - - -class TestInventoryNoDB(_TestCase): - - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - 'ensure_rc_cache', side_effect=_fake_ensure_cache) - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - '_get_inventory_by_provider_id') - def test_get_all_by_resource_provider(self, mock_get, mock_ensure_cache): - mock_ensure_cache(self.context) - expected = [dict(_INVENTORY_DB, - resource_provider_id=_RESOURCE_PROVIDER_ID), - dict(_INVENTORY_DB, - id=_INVENTORY_DB['id'] + 1, - resource_provider_id=_RESOURCE_PROVIDER_ID)] - mock_get.return_value = expected - rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, - uuid=_RESOURCE_PROVIDER_UUID) - objs = resource_provider.InventoryList.get_all_by_resource_provider( - self.context, rp) - self.assertEqual(2, len(objs)) - self.assertEqual(_INVENTORY_DB['id'], objs[0].id) - self.assertEqual(_INVENTORY_DB['id'] + 1, objs[1].id) - self.assertEqual(_RESOURCE_PROVIDER_ID, objs[0].resource_provider.id) - - def test_non_negative_handling(self): - # NOTE(cdent): Just checking, useless to be actually - # comprehensive here. - rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, - uuid=_RESOURCE_PROVIDER_UUID) - kwargs = dict(resource_provider=rp, - resource_class=_RESOURCE_CLASS_NAME, - total=16, - reserved=2, - min_unit=1, - max_unit=-8, - step_size=1, - allocation_ratio=1.0) - self.assertRaises(ValueError, resource_provider.Inventory, - **kwargs) - - def test_set_defaults(self): - rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, - uuid=_RESOURCE_PROVIDER_UUID) - kwargs = dict(resource_provider=rp, - resource_class=_RESOURCE_CLASS_NAME, - total=16) - inv = resource_provider.Inventory(self.context, **kwargs) - - inv.obj_set_defaults() - self.assertEqual(0, inv.reserved) - self.assertEqual(1, inv.min_unit) - self.assertEqual(1, inv.max_unit) - self.assertEqual(1, inv.step_size) - self.assertEqual(1.0, inv.allocation_ratio) - - def test_capacity(self): - rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, - uuid=_RESOURCE_PROVIDER_UUID) - kwargs = dict(resource_provider=rp, - resource_class=_RESOURCE_CLASS_NAME, - total=16, - reserved=16) - inv = resource_provider.Inventory(self.context, **kwargs) - inv.obj_set_defaults() - - self.assertEqual(0, inv.capacity) - inv.reserved = 15 - self.assertEqual(1, inv.capacity) - inv.allocation_ratio = 2.0 - self.assertEqual(2, inv.capacity) - - -class TestInventoryList(_TestCase): - - def test_find(self): - rp = resource_provider.ResourceProvider(uuid=uuids.rp_uuid) - inv_list = resource_provider.InventoryList(objects=[ - resource_provider.Inventory( - resource_provider=rp, - resource_class=fields.ResourceClass.VCPU, - total=24), - resource_provider.Inventory( - resource_provider=rp, - resource_class=fields.ResourceClass.MEMORY_MB, - total=10240), - ]) - - found = inv_list.find(fields.ResourceClass.MEMORY_MB) - self.assertIsNotNone(found) - self.assertEqual(10240, found.total) - - found = inv_list.find(fields.ResourceClass.VCPU) - self.assertIsNotNone(found) - self.assertEqual(24, found.total) - - found = inv_list.find(fields.ResourceClass.DISK_GB) - self.assertIsNone(found) - - # Try an integer resource class identifier... - self.assertRaises(ValueError, inv_list.find, VCPU_ID) - - # Use an invalid string... - self.assertIsNone(inv_list.find('HOUSE')) - - -class TestAllocationListNoDB(_TestCase): - - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - '_create_incomplete_consumers_for_provider') - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - 'ensure_rc_cache', - side_effect=_fake_ensure_cache) - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - '_get_allocations_by_provider_id', - return_value=[_ALLOCATION_DB]) - def test_get_allocations(self, mock_get_allocations_from_db, - mock_ensure_cache, mock_create_consumers): - mock_ensure_cache(self.context) - rp = resource_provider.ResourceProvider(id=_RESOURCE_PROVIDER_ID, - uuid=uuids.resource_provider) - rp_alloc_list = resource_provider.AllocationList - allocations = rp_alloc_list.get_all_by_resource_provider( - self.context, rp) - - self.assertEqual(1, len(allocations)) - mock_get_allocations_from_db.assert_called_once_with(self.context, - rp.id) - self.assertEqual(_ALLOCATION_DB['used'], allocations[0].used) - mock_create_consumers.assert_called_once_with( - self.context, _RESOURCE_PROVIDER_ID) - - -class TestResourceClass(_TestCase): - - def test_cannot_create_with_id(self): - rc = resource_provider.ResourceClass(self.context, id=1, - name='CUSTOM_IRON_NFV') - exc = self.assertRaises(exception.ObjectActionError, rc.create) - self.assertIn('already created', str(exc)) - - def test_cannot_create_requires_name(self): - rc = resource_provider.ResourceClass(self.context) - exc = self.assertRaises(exception.ObjectActionError, rc.create) - self.assertIn('name is required', str(exc)) - - -class TestTraits(_TestCase): - - @mock.patch("nova.api.openstack.placement.objects.resource_provider." - "_trait_sync") - def test_sync_flag(self, mock_sync): - synced = resource_provider._TRAITS_SYNCED - self.assertFalse(synced) - # Sync the traits - resource_provider.ensure_trait_sync(self.context) - synced = resource_provider._TRAITS_SYNCED - self.assertTrue(synced) - - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - 'ResourceProvider.obj_reset_changes') - @mock.patch('nova.api.openstack.placement.objects.resource_provider.' - '_set_traits') - def test_set_traits_resets_changes(self, mock_set_traits, mock_reset): - trait = resource_provider.Trait(name="HW_CPU_X86_AVX2") - traits = resource_provider.TraitList(objects=[trait]) - - rp = resource_provider.ResourceProvider(self.context, name='cn1', - uuid=uuids.cn1) - rp.set_traits(traits) - mock_set_traits.assert_called_once_with(self.context, rp, traits) - mock_reset.assert_called_once_with() diff --git a/nova/tests/unit/api/openstack/placement/test_context.py b/nova/tests/unit/api/openstack/placement/test_context.py deleted file mode 100644 index 18101615919f..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_context.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from nova.api.openstack.placement import context -from nova.api.openstack.placement import exception - - -class TestPlacementRequestContext(testtools.TestCase): - """Test cases for PlacementRequestContext.""" - - def setUp(self): - super(TestPlacementRequestContext, self).setUp() - self.ctxt = context.RequestContext(user_id='fake', project_id='fake') - self.default_target = {'user_id': self.ctxt.user_id, - 'project_id': self.ctxt.project_id} - - @mock.patch('nova.api.openstack.placement.policy.authorize', - return_value=True) - def test_can_target_none_fatal_true_accept(self, mock_authorize): - self.assertTrue(self.ctxt.can('placement:resource_providers:list')) - mock_authorize.assert_called_once_with( - self.ctxt, 'placement:resource_providers:list', - self.default_target) - - @mock.patch('nova.api.openstack.placement.policy.authorize', - side_effect=exception.PolicyNotAuthorized( - action='placement:resource_providers:list')) - def test_can_target_none_fatal_true_reject(self, mock_authorize): - self.assertRaises(exception.PolicyNotAuthorized, - self.ctxt.can, 'placement:resource_providers:list') - mock_authorize.assert_called_once_with( - self.ctxt, 'placement:resource_providers:list', - self.default_target) - - @mock.patch('nova.api.openstack.placement.policy.authorize', - side_effect=exception.PolicyNotAuthorized( - action='placement:resource_providers:list')) - def test_can_target_none_fatal_false_reject(self, mock_authorize): - self.assertFalse(self.ctxt.can('placement:resource_providers:list', - fatal=False)) - mock_authorize.assert_called_once_with( - self.ctxt, 'placement:resource_providers:list', - self.default_target) - - @mock.patch('nova.api.openstack.placement.policy.authorize', - return_value=True) - def test_can_target_none_fatal_true_accept_custom_target( - self, mock_authorize): - class MyObj(object): - user_id = project_id = 'fake2' - - target = MyObj() - self.assertTrue(self.ctxt.can('placement:resource_providers:list', - target=target)) - mock_authorize.assert_called_once_with( - self.ctxt, 'placement:resource_providers:list', target) diff --git a/nova/tests/unit/api/openstack/placement/test_db_api.py b/nova/tests/unit/api/openstack/placement/test_db_api.py deleted file mode 100644 index 0d247912f3cd..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_db_api.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -import mock -import testtools - -from oslo_config import cfg -from oslo_config import fixture as config_fixture - -from nova.api.openstack.placement import db_api - - -CONF = cfg.CONF - - -class DbApiTests(testtools.TestCase): - - def setUp(self): - super(DbApiTests, self).setUp() - self.conf_fixture = self.useFixture(config_fixture.Config(CONF)) - db_api.configure.reset() - - @mock.patch.object(db_api.placement_context_manager, "configure") - def test_can_call_configure_twice(self, configure_mock): - """This test asserts that configure can be safely called twice - which may happen if placement is run under mod_wsgi and the - wsgi application is reloaded. - """ - db_api.configure(self.conf_fixture.conf) - configure_mock.assert_called_once() - - # a second invocation of configure on a transaction context - # should raise an exception so mock this and assert its not - # called on a second invocation of db_api's configure function - configure_mock.side_effect = TypeError() - - db_api.configure(self.conf_fixture.conf) - # Note we have not reset the mock so it should - # have been called once from the first invocation of - # db_api.configure and the second invocation should not - # have called it again - configure_mock.assert_called_once() diff --git a/nova/tests/unit/api/openstack/placement/test_deploy.py b/nova/tests/unit/api/openstack/placement/test_deploy.py deleted file mode 100644 index 680d92cd187a..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_deploy.py +++ /dev/null @@ -1,47 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for the deply function used to build the Placement service.""" - -from oslo_config import cfg -from oslo_policy import opts as policy_opts -import testtools -import webob - -from nova.api.openstack.placement import deploy - - -CONF = cfg.CONF - - -class DeployTest(testtools.TestCase): - - def test_auth_middleware_factory(self): - """Make sure that configuration settings make their way to - the keystone middleware correctly. - """ - www_authenticate_uri = 'http://example.com/identity' - CONF.set_override('www_authenticate_uri', www_authenticate_uri, - group='keystone_authtoken') - # ensure that the auth_token middleware is chosen - CONF.set_override('auth_strategy', 'keystone', group='api') - # register and default policy opts (referenced by deploy) - policy_opts.set_defaults(CONF) - app = deploy.deploy(CONF) - req = webob.Request.blank('/resource_providers', method="GET") - - response = req.get_response(app) - - auth_header = response.headers['www-authenticate'] - self.assertIn(www_authenticate_uri, auth_header) - self.assertIn('keystone uri=', auth_header.lower()) diff --git a/nova/tests/unit/api/openstack/placement/test_fault_wrap.py b/nova/tests/unit/api/openstack/placement/test_fault_wrap.py deleted file mode 100644 index 6b794631ce2d..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_fault_wrap.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for the placement fault wrap middleware.""" - -import mock - -from oslo_serialization import jsonutils -import testtools -import webob - -from nova.api.openstack.placement import fault_wrap - - -ERROR_MESSAGE = 'that was not supposed to happen' - - -class Fault(Exception): - pass - - -class TestFaultWrapper(testtools.TestCase): - - @staticmethod - @webob.dec.wsgify - def failing_application(req): - raise Fault(ERROR_MESSAGE) - - def setUp(self): - super(TestFaultWrapper, self).setUp() - self.req = webob.Request.blank('/') - self.environ = self.req.environ - self.environ['HTTP_ACCEPT'] = 'application/json' - self.start_response_mock = mock.MagicMock() - self.fail_app = fault_wrap.FaultWrapper(self.failing_application) - - def test_fault_is_wrapped(self): - response = self.fail_app(self.environ, self.start_response_mock) - # response is a single member list - error_struct = jsonutils.loads(response[0]) - first_error = error_struct['errors'][0] - - self.assertIn(ERROR_MESSAGE, first_error['detail']) - self.assertEqual(500, first_error['status']) - self.assertEqual('Internal Server Error', first_error['title']) - - def test_fault_response_headers(self): - self.fail_app(self.environ, self.start_response_mock) - call_args = self.start_response_mock.call_args - self.assertEqual('500 Internal Server Error', call_args[0][0]) - - @mock.patch("nova.api.openstack.placement.fault_wrap.LOG") - def test_fault_log(self, mocked_log): - self.fail_app(self.environ, self.start_response_mock) - mocked_log.exception.assert_called_once_with( - 'Placement API unexpected error: %s', - mock.ANY) diff --git a/nova/tests/unit/api/openstack/placement/test_handler.py b/nova/tests/unit/api/openstack/placement/test_handler.py deleted file mode 100644 index 0574c7e4bbd2..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_handler.py +++ /dev/null @@ -1,194 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for the functions used by the placement API handlers.""" - -import microversion_parse -import mock -from oslo_utils.fixture import uuidsentinel -import routes -import testtools -import webob - -from nova.api.openstack.placement import handler -from nova.api.openstack.placement.handlers import root -from nova.api.openstack.placement import microversion - - -# Used in tests below -def start_response(*args, **kwargs): - pass - - -def _environ(path='/moo', method='GET'): - return { - 'PATH_INFO': path, - 'REQUEST_METHOD': method, - 'SERVER_NAME': 'example.com', - 'SERVER_PORT': '80', - 'wsgi.url_scheme': 'http', - # The microversion version value is not used, but it - # needs to be set to avoid a KeyError. - microversion.MICROVERSION_ENVIRON: microversion_parse.Version(1, 12), - } - - -class DispatchTest(testtools.TestCase): - - def setUp(self): - super(DispatchTest, self).setUp() - self.mapper = routes.Mapper() - self.route_handler = mock.MagicMock() - - def test_no_match_null_map(self): - self.assertRaises(webob.exc.HTTPNotFound, - handler.dispatch, - _environ(), start_response, - self.mapper) - - def test_no_match_with_map(self): - self.mapper.connect('/foobar', action='hello') - self.assertRaises(webob.exc.HTTPNotFound, - handler.dispatch, - _environ(), start_response, - self.mapper) - - def test_simple_match(self): - self.mapper.connect('/foobar', action=self.route_handler, - conditions=dict(method=['GET'])) - environ = _environ(path='/foobar') - handler.dispatch(environ, start_response, self.mapper) - self.route_handler.assert_called_with(environ, start_response) - - def test_simple_match_routing_args(self): - self.mapper.connect('/foobar/{id}', action=self.route_handler, - conditions=dict(method=['GET'])) - environ = _environ(path='/foobar/%s' % uuidsentinel.foobar) - handler.dispatch(environ, start_response, self.mapper) - self.route_handler.assert_called_with(environ, start_response) - self.assertEqual(uuidsentinel.foobar, - environ['wsgiorg.routing_args'][1]['id']) - - -class MapperTest(testtools.TestCase): - - def setUp(self): - super(MapperTest, self).setUp() - declarations = { - '/hello': {'GET': 'hello'} - } - self.mapper = handler.make_map(declarations) - - def test_no_match(self): - environ = _environ(path='/cow') - self.assertIsNone(self.mapper.match(environ=environ)) - - def test_match(self): - environ = _environ(path='/hello') - action = self.mapper.match(environ=environ)['action'] - self.assertEqual('hello', action) - - def test_405_methods(self): - environ = _environ(path='/hello', method='POST') - result = self.mapper.match(environ=environ) - self.assertEqual(handler.handle_405, result['action']) - self.assertEqual('GET', result['_methods']) - - def test_405_headers(self): - environ = _environ(path='/hello', method='POST') - global headers, status - headers = status = None - - def local_start_response(*args, **kwargs): - global headers, status - status = args[0] - headers = {header[0]: header[1] for header in args[1]} - - handler.dispatch(environ, local_start_response, self.mapper) - allow_header = headers['allow'] - self.assertEqual('405 Method Not Allowed', status) - self.assertEqual('GET', allow_header) - # PEP 3333 requires that headers be whatever the native str - # is in that version of Python. Never unicode. - self.assertEqual(str, type(allow_header)) - - -class PlacementLoggingTest(testtools.TestCase): - - @mock.patch("nova.api.openstack.placement.handler.LOG") - def test_404_no_error_log(self, mocked_log): - environ = _environ(path='/hello', method='GET') - context_mock = mock.Mock() - context_mock.to_policy_values.return_value = {'roles': ['admin']} - environ['placement.context'] = context_mock - app = handler.PlacementHandler() - self.assertRaises(webob.exc.HTTPNotFound, - app, environ, start_response) - mocked_log.error.assert_not_called() - mocked_log.exception.assert_not_called() - - -class DeclarationsTest(testtools.TestCase): - - def setUp(self): - super(DeclarationsTest, self).setUp() - self.mapper = handler.make_map(handler.ROUTE_DECLARATIONS) - - def test_root_slash_match(self): - environ = _environ(path='/') - result = self.mapper.match(environ=environ) - self.assertEqual(root.home, result['action']) - - def test_root_empty_match(self): - environ = _environ(path='') - result = self.mapper.match(environ=environ) - self.assertEqual(root.home, result['action']) - - -class ContentHeadersTest(testtools.TestCase): - - def setUp(self): - super(ContentHeadersTest, self).setUp() - self.environ = _environ(path='/') - self.app = handler.PlacementHandler() - - def test_no_content_type(self): - self.environ['CONTENT_LENGTH'] = '10' - self.assertRaisesRegex(webob.exc.HTTPBadRequest, - "content-type header required when " - "content-length > 0", self.app, - self.environ, start_response) - - def test_non_integer_content_length(self): - self.environ['CONTENT_LENGTH'] = 'foo' - self.assertRaisesRegex(webob.exc.HTTPBadRequest, - "content-length header must be an integer", - self.app, self.environ, start_response) - - def test_empty_content_type(self): - self.environ['CONTENT_LENGTH'] = '10' - self.environ['CONTENT_TYPE'] = '' - self.assertRaisesRegex(webob.exc.HTTPBadRequest, - "content-type header required when " - "content-length > 0", self.app, - self.environ, start_response) - - def test_empty_content_length_and_type_works(self): - self.environ['CONTENT_LENGTH'] = '' - self.environ['CONTENT_TYPE'] = '' - self.app(self.environ, start_response) - - def test_content_length_and_type_works(self): - self.environ['CONTENT_LENGTH'] = '10' - self.environ['CONTENT_TYPE'] = 'foo' - self.app(self.environ, start_response) diff --git a/nova/tests/unit/api/openstack/placement/test_microversion.py b/nova/tests/unit/api/openstack/placement/test_microversion.py deleted file mode 100644 index 25e53a7f7c38..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_microversion.py +++ /dev/null @@ -1,153 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for placement microversion handling.""" - -import collections -import operator -import testtools -import webob - -import microversion_parse -import mock - -# import the handlers to load up handler decorators -import nova.api.openstack.placement.handler # noqa -from nova.api.openstack.placement import microversion - - -def handler(): - return True - - -class TestMicroversionFindMethod(testtools.TestCase): - def test_method_405(self): - self.assertRaises(webob.exc.HTTPMethodNotAllowed, - microversion._find_method, handler, '1.1', 405) - - def test_method_404(self): - self.assertRaises(webob.exc.HTTPNotFound, - microversion._find_method, handler, '1.1', 404) - - -class TestMicroversionDecoration(testtools.TestCase): - - @mock.patch('nova.api.openstack.placement.microversion.VERSIONED_METHODS', - new=collections.defaultdict(list)) - def test_methods_structure(self): - """Test that VERSIONED_METHODS gets data as expected.""" - self.assertEqual(0, len(microversion.VERSIONED_METHODS)) - fully_qualified_method = microversion._fully_qualified_name( - handler) - microversion.version_handler('1.1', '1.10')(handler) - microversion.version_handler('2.0')(handler) - - methods_data = microversion.VERSIONED_METHODS[fully_qualified_method] - - stored_method_data = methods_data[-1] - self.assertEqual(2, len(methods_data)) - self.assertEqual(microversion_parse.Version(1, 1), - stored_method_data[0]) - self.assertEqual(microversion_parse.Version(1, 10), - stored_method_data[1]) - self.assertEqual(handler, stored_method_data[2]) - self.assertEqual(microversion_parse.Version(2, 0), - methods_data[0][0]) - - def test_version_handler_float_exception(self): - self.assertRaises(TypeError, - microversion.version_handler(1.1), - handler) - - def test_version_handler_nan_exception(self): - self.assertRaises(TypeError, - microversion.version_handler('cow'), - handler) - - def test_version_handler_tuple_exception(self): - self.assertRaises(TypeError, - microversion.version_handler((1, 1)), - handler) - - -class TestMicroversionIntersection(testtools.TestCase): - """Test that there are no overlaps in the versioned handlers.""" - - # If you add versioned handlers you need to update this value to - # reflect the change. The value is the total number of methods - # with different names, not the total number overall. That is, - # if you add two different versions of method 'foobar' the - # number only goes up by one if no other version foobar yet - # exists. This operates as a simple sanity check. - TOTAL_VERSIONED_METHODS = 20 - - def test_methods_versioned(self): - methods_data = microversion.VERSIONED_METHODS - self.assertEqual(self.TOTAL_VERSIONED_METHODS, len(methods_data)) - - @staticmethod - def _check_intersection(method_info): - # See check_for_versions_intersection in - # nova.api.openstack.wsgi. - pairs = [] - counter = 0 - for min_ver, max_ver, func in method_info: - pairs.append((min_ver, 1, func)) - pairs.append((max_ver, -1, func)) - - pairs.sort(key=operator.itemgetter(0)) - - for p in pairs: - counter += p[1] - if counter > 1: - return True - return False - - @mock.patch('nova.api.openstack.placement.microversion.VERSIONED_METHODS', - new=collections.defaultdict(list)) - def test_faked_intersection(self): - microversion.version_handler('1.0', '1.9')(handler) - microversion.version_handler('1.8', '2.0')(handler) - - for method_info in microversion.VERSIONED_METHODS.values(): - self.assertTrue(self._check_intersection(method_info)) - - @mock.patch('nova.api.openstack.placement.microversion.VERSIONED_METHODS', - new=collections.defaultdict(list)) - def test_faked_non_intersection(self): - microversion.version_handler('1.0', '1.8')(handler) - microversion.version_handler('1.9', '2.0')(handler) - - for method_info in microversion.VERSIONED_METHODS.values(): - self.assertFalse(self._check_intersection(method_info)) - - def test_check_real_for_intersection(self): - """Check the real handlers to make sure there is no intersctions.""" - for method_name, method_info in microversion.VERSIONED_METHODS.items(): - self.assertFalse( - self._check_intersection(method_info), - 'method %s has intersecting versioned handlers' % method_name) - - -class MicroversionSequentialTest(testtools.TestCase): - - def test_microversion_sequential(self): - for method_name, method_list in microversion.VERSIONED_METHODS.items(): - previous_min_version = method_list[0][0] - for method in method_list[1:]: - previous_min_version = microversion_parse.parse_version_string( - '%s.%s' % (previous_min_version.major, - previous_min_version.minor - 1)) - self.assertEqual(previous_min_version, method[1], - "The microversions aren't sequential in the mehtod %s" % - method_name) - previous_min_version = method[0] diff --git a/nova/tests/unit/api/openstack/placement/test_policy.py b/nova/tests/unit/api/openstack/placement/test_policy.py deleted file mode 100644 index ad0bdf637a6a..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_policy.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import os - -from oslo_policy import policy as oslo_policy -import testtools - -from nova.api.openstack.placement import context -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import policy -from nova.tests.unit import conf_fixture -from nova.tests.unit import policy_fixture -from nova import utils - - -class PlacementPolicyTestCase(testtools.TestCase): - """Tests interactions with placement policy. - - These tests do not rely on the base nova.test.TestCase to avoid - interference from the PlacementPolicyFixture which is not used in all - test cases. - """ - def setUp(self): - super(PlacementPolicyTestCase, self).setUp() - self.conf = self.useFixture(conf_fixture.ConfFixture()).conf - self.ctxt = context.RequestContext(user_id='fake', project_id='fake') - self.target = {'user_id': 'fake', 'project_id': 'fake'} - - def test_modified_policy_reloads(self): - """Creates a temporary placement-policy.yaml file and tests - authorizations against a fake rule between updates to the physical - policy file. - """ - with utils.tempdir() as tmpdir: - tmpfilename = os.path.join(tmpdir, 'placement-policy.yaml') - - self.conf.set_default( - 'policy_file', tmpfilename, group='placement') - - action = 'placement:test' - # Expect PolicyNotRegistered since defaults are not yet loaded. - self.assertRaises(oslo_policy.PolicyNotRegistered, - policy.authorize, self.ctxt, action, self.target) - - # Load the default action and rule (defaults to "any"). - enforcer = policy.get_enforcer() - rule = oslo_policy.RuleDefault(action, '') - enforcer.register_default(rule) - - # Now auth should work because the action is registered and anyone - # can perform the action. - policy.authorize(self.ctxt, action, self.target) - - # Now update the policy file and reload it to disable the action - # from all users. - with open(tmpfilename, "w") as policyfile: - policyfile.write('"%s": "!"' % action) - enforcer.load_rules(force_reload=True) - self.assertRaises(exception.PolicyNotAuthorized, policy.authorize, - self.ctxt, action, self.target) - - def test_authorize_do_raise_false(self): - """Tests that authorize does not raise an exception when the check - fails. - """ - fixture = self.useFixture(policy_fixture.PlacementPolicyFixture()) - fixture.set_rules({'placement': '!'}) - self.assertFalse( - policy.authorize( - self.ctxt, 'placement', self.target, do_raise=False)) diff --git a/nova/tests/unit/api/openstack/placement/test_requestlog.py b/nova/tests/unit/api/openstack/placement/test_requestlog.py deleted file mode 100644 index 9e28471873cb..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_requestlog.py +++ /dev/null @@ -1,72 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for the placement request log middleware.""" - -import mock -import testtools -import webob - -from nova.api.openstack.placement import requestlog - - -class TestRequestLog(testtools.TestCase): - - @staticmethod - @webob.dec.wsgify - def application(req): - req.response.status = 200 - return req.response - - def setUp(self): - super(TestRequestLog, self).setUp() - self.req = webob.Request.blank('/resource_providers?name=myrp') - self.environ = self.req.environ - # The blank does not include remote address, so add it. - self.environ['REMOTE_ADDR'] = '127.0.0.1' - # nor a microversion - self.environ['placement.microversion'] = '2.1' - - def test_get_uri(self): - req_uri = requestlog.RequestLog._get_uri(self.environ) - self.assertEqual('/resource_providers?name=myrp', req_uri) - - def test_get_uri_knows_prefix(self): - self.environ['SCRIPT_NAME'] = '/placement' - req_uri = requestlog.RequestLog._get_uri(self.environ) - self.assertEqual('/placement/resource_providers?name=myrp', req_uri) - - @mock.patch("nova.api.openstack.placement.requestlog.RequestLog.write_log") - def test_middleware_writes_logs(self, write_log): - start_response_mock = mock.MagicMock() - app = requestlog.RequestLog(self.application) - app(self.environ, start_response_mock) - write_log.assert_called_once_with( - self.environ, '/resource_providers?name=myrp', '200 OK', '0') - - @mock.patch("nova.api.openstack.placement.requestlog.LOG") - def test_middleware_sends_message(self, mocked_log): - start_response_mock = mock.MagicMock() - app = requestlog.RequestLog(self.application) - app(self.environ, start_response_mock) - mocked_log.debug.assert_called_once_with( - 'Starting request: %s "%s %s"', '127.0.0.1', 'GET', - '/resource_providers?name=myrp') - mocked_log.info.assert_called_once_with( - '%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" ' - 'status: %(status)s len: %(bytes)s microversion: %(microversion)s', - {'microversion': '2.1', - 'status': '200', - 'REQUEST_URI': '/resource_providers?name=myrp', - 'REQUEST_METHOD': 'GET', - 'REMOTE_ADDR': '127.0.0.1', - 'bytes': '0'}) diff --git a/nova/tests/unit/api/openstack/placement/test_util.py b/nova/tests/unit/api/openstack/placement/test_util.py deleted file mode 100644 index e9becece52d0..000000000000 --- a/nova/tests/unit/api/openstack/placement/test_util.py +++ /dev/null @@ -1,1119 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Unit tests for the utility functions used by the placement API.""" - - -import datetime - -import fixtures -import microversion_parse -import mock -from oslo_config import cfg -from oslo_middleware import request_id -from oslo_utils.fixture import uuidsentinel -from oslo_utils import timeutils -import testtools -import webob - -import six - -from nova.api.openstack.placement import exception -from nova.api.openstack.placement import lib as pl -from nova.api.openstack.placement import microversion -from nova.api.openstack.placement.objects import consumer as consumer_obj -from nova.api.openstack.placement.objects import project as project_obj -from nova.api.openstack.placement.objects import resource_provider as rp_obj -from nova.api.openstack.placement.objects import user as user_obj -from nova.api.openstack.placement import util - - -CONF = cfg.CONF - - -class TestCheckAccept(testtools.TestCase): - """Confirm behavior of util.check_accept.""" - - @staticmethod - @util.check_accept('application/json', 'application/vnd.openstack') - def handler(req): - """Fake handler to test decorator.""" - return True - - def test_fail_no_match(self): - req = webob.Request.blank('/') - req.accept = 'text/plain' - - error = self.assertRaises(webob.exc.HTTPNotAcceptable, - self.handler, req) - self.assertEqual( - 'Only application/json, application/vnd.openstack is provided', - str(error)) - - def test_fail_complex_no_match(self): - req = webob.Request.blank('/') - req.accept = 'text/html;q=0.9,text/plain,application/vnd.aws;q=0.8' - - error = self.assertRaises(webob.exc.HTTPNotAcceptable, - self.handler, req) - self.assertEqual( - 'Only application/json, application/vnd.openstack is provided', - str(error)) - - def test_success_no_accept(self): - req = webob.Request.blank('/') - self.assertTrue(self.handler(req)) - - def test_success_simple_match(self): - req = webob.Request.blank('/') - req.accept = 'application/json' - self.assertTrue(self.handler(req)) - - def test_success_complex_any_match(self): - req = webob.Request.blank('/') - req.accept = 'application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' - self.assertTrue(self.handler(req)) - - def test_success_complex_lower_quality_match(self): - req = webob.Request.blank('/') - req.accept = 'application/xml;q=0.9,application/vnd.openstack;q=0.8' - self.assertTrue(self.handler(req)) - - -class TestExtractJSON(testtools.TestCase): - - # Although the intent of this test class is not to test that - # schemas work, we may as well use a real one to ensure that - # behaviors are what we expect. - schema = { - "type": "object", - "properties": { - "name": {"type": "string"}, - "uuid": {"type": "string", "format": "uuid"} - }, - "required": ["name"], - "additionalProperties": False - } - - def test_not_json(self): - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.extract_json, - 'I am a string', - self.schema) - self.assertIn('Malformed JSON', str(error)) - - def test_malformed_json(self): - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.extract_json, - '{"my bytes got left behind":}', - self.schema) - self.assertIn('Malformed JSON', str(error)) - - def test_schema_mismatch(self): - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.extract_json, - '{"a": "b"}', - self.schema) - self.assertIn('JSON does not validate', str(error)) - - def test_type_invalid(self): - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.extract_json, - '{"name": 1}', - self.schema) - self.assertIn('JSON does not validate', str(error)) - - def test_format_checker(self): - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.extract_json, - '{"name": "hello", "uuid": "not a uuid"}', - self.schema) - self.assertIn('JSON does not validate', str(error)) - - def test_no_additional_properties(self): - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.extract_json, - '{"name": "hello", "cow": "moo"}', - self.schema) - self.assertIn('JSON does not validate', str(error)) - - def test_valid(self): - data = util.extract_json( - '{"name": "cow", ' - '"uuid": "%s"}' % uuidsentinel.rp_uuid, - self.schema) - self.assertEqual('cow', data['name']) - self.assertEqual(uuidsentinel.rp_uuid, data['uuid']) - - -class QueryParamsSchemaTestCase(testtools.TestCase): - - def test_validate_request(self): - schema = { - 'type': 'object', - 'properties': { - 'foo': {'type': 'string'} - }, - 'additionalProperties': False} - req = webob.Request.blank('/test?foo=%88') - error = self.assertRaises(webob.exc.HTTPBadRequest, - util.validate_query_params, - req, schema) - self.assertIn('Invalid query string parameters', six.text_type(error)) - - -class TestJSONErrorFormatter(testtools.TestCase): - - def setUp(self): - super(TestJSONErrorFormatter, self).setUp() - self.environ = {} - # TODO(jaypipes): Remove this when we get more than a single version - # in the placement API. The fact that we only had a single version was - # masking a bug in the utils code. - _versions = [ - '1.0', - '1.1', - ] - mod_str = 'nova.api.openstack.placement.microversion.VERSIONS' - self.useFixture(fixtures.MonkeyPatch(mod_str, _versions)) - - def test_status_to_int_code(self): - body = '' - status = '404 Not Found' - title = '' - - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertEqual(404, result['errors'][0]['status']) - - def test_strip_body_tags(self): - body = '

Big Error!

' - status = '400 Bad Request' - title = '' - - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertEqual('Big Error!', result['errors'][0]['detail']) - - def test_request_id_presence(self): - body = '' - status = '400 Bad Request' - title = '' - - # no request id in environ, none in error - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertNotIn('request_id', result['errors'][0]) - - # request id in environ, request id in error - self.environ[request_id.ENV_REQUEST_ID] = 'stub-id' - - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertEqual('stub-id', result['errors'][0]['request_id']) - - def test_microversion_406_handling(self): - body = '' - status = '400 Bad Request' - title = '' - - # Not a 406, no version info required. - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertNotIn('max_version', result['errors'][0]) - self.assertNotIn('min_version', result['errors'][0]) - - # A 406 but not because of microversions (microversion - # parsing was successful), no version info - # required. - status = '406 Not Acceptable' - version_obj = microversion_parse.parse_version_string('2.3') - self.environ[microversion.MICROVERSION_ENVIRON] = version_obj - - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertNotIn('max_version', result['errors'][0]) - self.assertNotIn('min_version', result['errors'][0]) - - # Microversion parsing failed, status is 406, send version info. - del self.environ[microversion.MICROVERSION_ENVIRON] - - result = util.json_error_formatter( - body, status, title, self.environ) - self.assertEqual(microversion.max_version_string(), - result['errors'][0]['max_version']) - self.assertEqual(microversion.min_version_string(), - result['errors'][0]['min_version']) - - -class TestRequireContent(testtools.TestCase): - """Confirm behavior of util.require_accept.""" - - @staticmethod - @util.require_content('application/json') - def handler(req): - """Fake handler to test decorator.""" - return True - - def test_fail_no_content_type(self): - req = webob.Request.blank('/') - - error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType, - self.handler, req) - self.assertEqual( - 'The media type None is not supported, use application/json', - str(error)) - - def test_fail_wrong_content_type(self): - req = webob.Request.blank('/') - req.content_type = 'text/plain' - - error = self.assertRaises(webob.exc.HTTPUnsupportedMediaType, - self.handler, req) - self.assertEqual( - 'The media type text/plain is not supported, use application/json', - str(error)) - - def test_success_content_type(self): - req = webob.Request.blank('/') - req.content_type = 'application/json' - self.assertTrue(self.handler(req)) - - -class TestPlacementURLs(testtools.TestCase): - - def setUp(self): - super(TestPlacementURLs, self).setUp() - self.resource_provider = rp_obj.ResourceProvider( - name=uuidsentinel.rp_name, - uuid=uuidsentinel.rp_uuid) - self.resource_class = rp_obj.ResourceClass( - name='CUSTOM_BAREMETAL_GOLD', - id=1000) - - def test_resource_provider_url(self): - environ = {} - expected_url = '/resource_providers/%s' % uuidsentinel.rp_uuid - self.assertEqual(expected_url, util.resource_provider_url( - environ, self.resource_provider)) - - def test_resource_provider_url_prefix(self): - # SCRIPT_NAME represents the mount point of a WSGI - # application when it is hosted at a path/prefix. - environ = {'SCRIPT_NAME': '/placement'} - expected_url = ('/placement/resource_providers/%s' - % uuidsentinel.rp_uuid) - self.assertEqual(expected_url, util.resource_provider_url( - environ, self.resource_provider)) - - def test_inventories_url(self): - environ = {} - expected_url = ('/resource_providers/%s/inventories' - % uuidsentinel.rp_uuid) - self.assertEqual(expected_url, util.inventory_url( - environ, self.resource_provider)) - - def test_inventory_url(self): - resource_class = 'DISK_GB' - environ = {} - expected_url = ('/resource_providers/%s/inventories/%s' - % (uuidsentinel.rp_uuid, resource_class)) - self.assertEqual(expected_url, util.inventory_url( - environ, self.resource_provider, resource_class)) - - def test_resource_class_url(self): - environ = {} - expected_url = '/resource_classes/CUSTOM_BAREMETAL_GOLD' - self.assertEqual(expected_url, util.resource_class_url( - environ, self.resource_class)) - - def test_resource_class_url_prefix(self): - # SCRIPT_NAME represents the mount point of a WSGI - # application when it is hosted at a path/prefix. - environ = {'SCRIPT_NAME': '/placement'} - expected_url = '/placement/resource_classes/CUSTOM_BAREMETAL_GOLD' - self.assertEqual(expected_url, util.resource_class_url( - environ, self.resource_class)) - - -class TestNormalizeResourceQsParam(testtools.TestCase): - - def test_success(self): - qs = "VCPU:1" - resources = util.normalize_resources_qs_param(qs) - expected = { - 'VCPU': 1, - } - self.assertEqual(expected, resources) - - qs = "VCPU:1,MEMORY_MB:1024,DISK_GB:100" - resources = util.normalize_resources_qs_param(qs) - expected = { - 'VCPU': 1, - 'MEMORY_MB': 1024, - 'DISK_GB': 100, - } - self.assertEqual(expected, resources) - - def test_400_empty_string(self): - qs = "" - self.assertRaises( - webob.exc.HTTPBadRequest, - util.normalize_resources_qs_param, - qs, - ) - - def test_400_bad_int(self): - qs = "VCPU:foo" - self.assertRaises( - webob.exc.HTTPBadRequest, - util.normalize_resources_qs_param, - qs, - ) - - def test_400_no_amount(self): - qs = "VCPU" - self.assertRaises( - webob.exc.HTTPBadRequest, - util.normalize_resources_qs_param, - qs, - ) - - def test_400_zero_amount(self): - qs = "VCPU:0" - self.assertRaises( - webob.exc.HTTPBadRequest, - util.normalize_resources_qs_param, - qs, - ) - - -class TestNormalizeTraitsQsParam(testtools.TestCase): - - def test_one(self): - trait = 'HW_CPU_X86_VMX' - # Various whitespace permutations - for fmt in ('%s', ' %s', '%s ', ' %s ', ' %s '): - self.assertEqual(set([trait]), - util.normalize_traits_qs_param(fmt % trait)) - - def test_multiple(self): - traits = ( - 'HW_CPU_X86_VMX', - 'HW_GPU_API_DIRECT3D_V12_0', - 'HW_NIC_OFFLOAD_RX', - 'CUSTOM_GOLD', - 'STORAGE_DISK_SSD', - ) - self.assertEqual( - set(traits), - util.normalize_traits_qs_param('%s, %s,%s , %s , %s ' % traits)) - - def test_400_all_empty(self): - for qs in ('', ' ', ' ', ',', ' , , '): - self.assertRaises( - webob.exc.HTTPBadRequest, util.normalize_traits_qs_param, qs) - - def test_400_some_empty(self): - traits = ( - 'HW_NIC_OFFLOAD_RX', - 'CUSTOM_GOLD', - 'STORAGE_DISK_SSD', - ) - for fmt in ('%s,,%s,%s', ',%s,%s,%s', '%s,%s,%s,', ' %s , %s , , %s'): - self.assertRaises(webob.exc.HTTPBadRequest, - util.normalize_traits_qs_param, fmt % traits) - - -class TestParseQsRequestGroups(testtools.TestCase): - - @staticmethod - def do_parse(qstring, version=(1, 18)): - """Converts a querystring to a MultiDict, mimicking request.GET, and - runs parse_qs_request_groups on it. - """ - req = webob.Request.blank('?' + qstring) - mv_parsed = microversion_parse.Version(*version) - mv_parsed.max_version = microversion_parse.parse_version_string( - microversion.max_version_string()) - mv_parsed.min_version = microversion_parse.parse_version_string( - microversion.min_version_string()) - req.environ['placement.microversion'] = mv_parsed - d = util.parse_qs_request_groups(req) - # Sort for easier testing - return [d[suff] for suff in sorted(d)] - - def assertRequestGroupsEqual(self, expected, observed): - self.assertEqual(len(expected), len(observed)) - for exp, obs in zip(expected, observed): - self.assertEqual(vars(exp), vars(obs)) - - def test_empty_raises(self): - # TODO(efried): Check the specific error code - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, '') - - def test_unnumbered_only(self): - """Unnumbered resources & traits - no numbered groupings.""" - qs = ('resources=VCPU:2,MEMORY_MB:2048' - '&required=HW_CPU_X86_VMX,CUSTOM_GOLD') - expected = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - }, - required_traits={ - 'HW_CPU_X86_VMX', - 'CUSTOM_GOLD', - }, - ), - ] - self.assertRequestGroupsEqual(expected, self.do_parse(qs)) - - def test_member_of_single_agg(self): - """Unnumbered resources with one member_of query param.""" - agg1_uuid = uuidsentinel.agg1 - qs = ('resources=VCPU:2,MEMORY_MB:2048' - '&member_of=%s' % agg1_uuid) - expected = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - }, - member_of=[ - set([agg1_uuid]) - ] - ), - ] - self.assertRequestGroupsEqual(expected, self.do_parse(qs)) - - def test_member_of_multiple_aggs_prior_microversion(self): - """Unnumbered resources with multiple member_of query params before the - supported microversion should raise a 400. - """ - agg1_uuid = uuidsentinel.agg1 - agg2_uuid = uuidsentinel.agg2 - qs = ('resources=VCPU:2,MEMORY_MB:2048' - '&member_of=%s' - '&member_of=%s' % (agg1_uuid, agg2_uuid)) - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - - def test_member_of_multiple_aggs(self): - """Unnumbered resources with multiple member_of query params.""" - agg1_uuid = uuidsentinel.agg1 - agg2_uuid = uuidsentinel.agg2 - qs = ('resources=VCPU:2,MEMORY_MB:2048' - '&member_of=%s' - '&member_of=%s' % (agg1_uuid, agg2_uuid)) - expected = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - }, - member_of=[ - set([agg1_uuid]), - set([agg2_uuid]) - ] - ), - ] - self.assertRequestGroupsEqual( - expected, self.do_parse(qs, version=(1, 24))) - - def test_unnumbered_resources_only(self): - """Validate the bit that can be used for 1.10 and earlier.""" - qs = 'resources=VCPU:2,MEMORY_MB:2048,DISK_GB:5,CUSTOM_MAGIC:123' - expected = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - 'DISK_GB': 5, - 'CUSTOM_MAGIC': 123, - }, - ), - ] - self.assertRequestGroupsEqual(expected, self.do_parse(qs)) - - def test_numbered_only(self): - # Crazy ordering and nonsequential numbers don't matter. - # It's okay to have a 'resources' without a 'required'. - # A trait that's repeated shows up in both spots. - qs = ('resources1=VCPU:2,MEMORY_MB:2048' - '&required42=CUSTOM_GOLD' - '&resources99=DISK_GB:5' - '&resources42=CUSTOM_MAGIC:123' - '&required1=HW_CPU_X86_VMX,CUSTOM_GOLD') - expected = [ - pl.RequestGroup( - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - }, - required_traits={ - 'HW_CPU_X86_VMX', - 'CUSTOM_GOLD', - }, - ), - pl.RequestGroup( - resources={ - 'CUSTOM_MAGIC': 123, - }, - required_traits={ - 'CUSTOM_GOLD', - }, - ), - pl.RequestGroup( - resources={ - 'DISK_GB': 5, - }, - ), - ] - self.assertRequestGroupsEqual(expected, self.do_parse(qs)) - - def test_numbered_and_unnumbered(self): - qs = ('resources=VCPU:3,MEMORY_MB:4096,DISK_GB:10' - '&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD' - '&resources1=SRIOV_NET_VF:2' - '&required1=CUSTOM_PHYSNET_PRIVATE' - '&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000' - ',NET_EGRESS_BYTES_SEC:10000' - '&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD' - '&resources3=CUSTOM_MAGIC:123') - expected = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 3, - 'MEMORY_MB': 4096, - 'DISK_GB': 10, - }, - required_traits={ - 'HW_CPU_X86_VMX', - 'CUSTOM_MEM_FLASH', - 'STORAGE_DISK_SSD', - }, - ), - pl.RequestGroup( - resources={ - 'SRIOV_NET_VF': 2, - }, - required_traits={ - 'CUSTOM_PHYSNET_PRIVATE', - }, - ), - pl.RequestGroup( - resources={ - 'SRIOV_NET_VF': 1, - 'NET_INGRESS_BYTES_SEC': 20000, - 'NET_EGRESS_BYTES_SEC': 10000, - }, - required_traits={ - 'CUSTOM_SWITCH_BIG', - 'CUSTOM_PHYSNET_PROD', - }, - ), - pl.RequestGroup( - resources={ - 'CUSTOM_MAGIC': 123, - }, - ), - ] - self.assertRequestGroupsEqual(expected, self.do_parse(qs)) - - def test_member_of_multiple_aggs_numbered(self): - """Numbered resources with multiple member_of query params.""" - agg1_uuid = uuidsentinel.agg1 - agg2_uuid = uuidsentinel.agg2 - agg3_uuid = uuidsentinel.agg3 - agg4_uuid = uuidsentinel.agg4 - qs = ('resources1=VCPU:2' - '&member_of1=%s' - '&member_of1=%s' - '&resources2=VCPU:2' - '&member_of2=in:%s,%s' % ( - agg1_uuid, agg2_uuid, agg3_uuid, agg4_uuid)) - expected = [ - pl.RequestGroup( - resources={ - 'VCPU': 2, - }, - member_of=[ - set([agg1_uuid]), - set([agg2_uuid]) - ] - ), - pl.RequestGroup( - resources={ - 'VCPU': 2, - }, - member_of=[ - set([agg3_uuid, agg4_uuid]), - ] - ), - ] - self.assertRequestGroupsEqual( - expected, self.do_parse(qs, version=(1, 24))) - - def test_400_malformed_resources(self): - # Somewhat duplicates TestNormalizeResourceQsParam.test_400*. - qs = ('resources=VCPU:0,MEMORY_MB:4096,DISK_GB:10' - # Bad ----------^ - '&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD' - '&resources1=SRIOV_NET_VF:2' - '&required1=CUSTOM_PHYSNET_PRIVATE' - '&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000' - ',NET_EGRESS_BYTES_SEC:10000' - '&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD' - '&resources3=CUSTOM_MAGIC:123') - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - - def test_400_malformed_traits(self): - # Somewhat duplicates TestNormalizeResourceQsParam.test_400*. - qs = ('resources=VCPU:7,MEMORY_MB:4096,DISK_GB:10' - '&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD' - '&resources1=SRIOV_NET_VF:2' - '&required1=CUSTOM_PHYSNET_PRIVATE' - '&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000' - ',NET_EGRESS_BYTES_SEC:10000' - '&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD,' - # Bad -------------------------------------------^ - '&resources3=CUSTOM_MAGIC:123') - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - - def test_400_traits_no_resources_unnumbered(self): - qs = ('resources9=VCPU:7,MEMORY_MB:4096,DISK_GB:10' - # Oops ---^ - '&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD' - '&resources1=SRIOV_NET_VF:2' - '&required1=CUSTOM_PHYSNET_PRIVATE' - '&resources2=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000' - ',NET_EGRESS_BYTES_SEC:10000' - '&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD' - '&resources3=CUSTOM_MAGIC:123') - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - - def test_400_traits_no_resources_numbered(self): - qs = ('resources=VCPU:7,MEMORY_MB:4096,DISK_GB:10' - '&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD' - '&resources11=SRIOV_NET_VF:2' - # Oops ----^^ - '&required1=CUSTOM_PHYSNET_PRIVATE' - '&resources20=SRIOV_NET_VF:1,NET_INGRESS_BYTES_SEC:20000' - # Oops ----^^ - ',NET_EGRESS_BYTES_SEC:10000' - '&required2=CUSTOM_SWITCH_BIG,CUSTOM_PHYSNET_PROD' - '&resources3=CUSTOM_MAGIC:123') - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - - def test_400_member_of_no_resources_numbered(self): - agg1_uuid = uuidsentinel.agg1 - qs = ('resources=VCPU:7,MEMORY_MB:4096,DISK_GB:10' - '&required=HW_CPU_X86_VMX,CUSTOM_MEM_FLASH,STORAGE_DISK_SSD' - '&member_of2=%s' % agg1_uuid) - self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - - def test_forbidden_one_group(self): - """When forbidden are allowed this will parse, but otherwise will - indicate an invalid trait. - """ - qs = ('resources=VCPU:2,MEMORY_MB:2048' - '&required=CUSTOM_PHYSNET1,!CUSTOM_SWITCH_BIG') - expected_forbidden = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - }, - required_traits={ - 'CUSTOM_PHYSNET1', - }, - forbidden_traits={ - 'CUSTOM_SWITCH_BIG', - } - ), - ] - expected_message = ( - "Invalid query string parameters: Expected 'required' parameter " - "value of the form: HW_CPU_X86_VMX,CUSTOM_MAGIC. Got: " - "CUSTOM_PHYSNET1,!CUSTOM_SWITCH_BIG") - exc = self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs) - self.assertEqual(expected_message, six.text_type(exc)) - self.assertRequestGroupsEqual( - expected_forbidden, self.do_parse(qs, version=(1, 22))) - - def test_forbidden_conflict(self): - qs = ('resources=VCPU:2,MEMORY_MB:2048' - '&required=CUSTOM_PHYSNET1,!CUSTOM_PHYSNET1') - - expected_message = ( - 'Conflicting required and forbidden traits found ' - 'in the following traits keys: required: (CUSTOM_PHYSNET1)') - - exc = self.assertRaises(webob.exc.HTTPBadRequest, self.do_parse, qs, - version=(1, 22)) - self.assertEqual(expected_message, six.text_type(exc)) - - def test_forbidden_two_groups(self): - qs = ('resources=VCPU:2,MEMORY_MB:2048&resources1=CUSTOM_MAGIC:1' - '&required1=CUSTOM_PHYSNET1,!CUSTOM_PHYSNET2') - expected = [ - pl.RequestGroup( - use_same_provider=False, - resources={ - 'VCPU': 2, - 'MEMORY_MB': 2048, - }, - ), - pl.RequestGroup( - resources={ - 'CUSTOM_MAGIC': 1, - }, - required_traits={ - 'CUSTOM_PHYSNET1', - }, - forbidden_traits={ - 'CUSTOM_PHYSNET2', - } - ), - ] - - self.assertRequestGroupsEqual( - expected, self.do_parse(qs, version=(1, 22))) - - def test_forbidden_separate_groups_no_conflict(self): - qs = ('resources1=CUSTOM_MAGIC:1&required1=CUSTOM_PHYSNET1' - '&resources2=CUSTOM_MAGIC:1&required2=!CUSTOM_PHYSNET1') - expected = [ - pl.RequestGroup( - use_same_provider=True, - resources={ - 'CUSTOM_MAGIC': 1, - }, - required_traits={ - 'CUSTOM_PHYSNET1', - } - ), - pl.RequestGroup( - use_same_provider=True, - resources={ - 'CUSTOM_MAGIC': 1, - }, - forbidden_traits={ - 'CUSTOM_PHYSNET1', - } - ), - ] - - self.assertRequestGroupsEqual( - expected, self.do_parse(qs, version=(1, 22))) - - -class TestPickLastModified(testtools.TestCase): - - def setUp(self): - super(TestPickLastModified, self).setUp() - self.resource_provider = rp_obj.ResourceProvider( - name=uuidsentinel.rp_name, uuid=uuidsentinel.rp_uuid) - - def test_updated_versus_none(self): - now = timeutils.utcnow(with_timezone=True) - self.resource_provider.updated_at = now - self.resource_provider.created_at = now - chosen_time = util.pick_last_modified(None, self.resource_provider) - self.assertEqual(now, chosen_time) - - def test_created_versus_none(self): - now = timeutils.utcnow(with_timezone=True) - self.resource_provider.created_at = now - self.resource_provider.updated_at = None - chosen_time = util.pick_last_modified(None, self.resource_provider) - self.assertEqual(now, chosen_time) - - def test_last_modified_less(self): - now = timeutils.utcnow(with_timezone=True) - less = now - datetime.timedelta(seconds=300) - self.resource_provider.updated_at = now - self.resource_provider.created_at = now - chosen_time = util.pick_last_modified(less, self.resource_provider) - self.assertEqual(now, chosen_time) - - def test_last_modified_more(self): - now = timeutils.utcnow(with_timezone=True) - more = now + datetime.timedelta(seconds=300) - self.resource_provider.updated_at = now - self.resource_provider.created_at = now - chosen_time = util.pick_last_modified(more, self.resource_provider) - self.assertEqual(more, chosen_time) - - def test_last_modified_same(self): - now = timeutils.utcnow(with_timezone=True) - self.resource_provider.updated_at = now - self.resource_provider.created_at = now - chosen_time = util.pick_last_modified(now, self.resource_provider) - self.assertEqual(now, chosen_time) - - def test_no_object_time_fields_less(self): - # An unsaved ovo will not have the created_at or updated_at fields - # present on the object at all. - now = timeutils.utcnow(with_timezone=True) - less = now - datetime.timedelta(seconds=300) - with mock.patch('oslo_utils.timeutils.utcnow') as mock_utc: - mock_utc.return_value = now - chosen_time = util.pick_last_modified( - less, self.resource_provider) - self.assertEqual(now, chosen_time) - mock_utc.assert_called_once_with(with_timezone=True) - - def test_no_object_time_fields_more(self): - # An unsaved ovo will not have the created_at or updated_at fields - # present on the object at all. - now = timeutils.utcnow(with_timezone=True) - more = now + datetime.timedelta(seconds=300) - with mock.patch('oslo_utils.timeutils.utcnow') as mock_utc: - mock_utc.return_value = now - chosen_time = util.pick_last_modified( - more, self.resource_provider) - self.assertEqual(more, chosen_time) - mock_utc.assert_called_once_with(with_timezone=True) - - def test_no_object_time_fields_none(self): - # An unsaved ovo will not have the created_at or updated_at fields - # present on the object at all. - now = timeutils.utcnow(with_timezone=True) - with mock.patch('oslo_utils.timeutils.utcnow') as mock_utc: - mock_utc.return_value = now - chosen_time = util.pick_last_modified( - None, self.resource_provider) - self.assertEqual(now, chosen_time) - mock_utc.assert_called_once_with(with_timezone=True) - - -class TestRequestGroup(testtools.TestCase): - def test_stringification(self): - grp = pl.RequestGroup( - resources={ - 'VCPU': 2, - 'CUSTOM_MAGIC': 1, - }, - required_traits={ - 'CUSTOM_VNIC_TYPE_NORMAL', - 'CUSTOM_PHYSNET1', - }, - forbidden_traits={ - 'CUSTOM_PHYSNET2', - 'CUSTOM_VNIC_TYPE_DIRECT' - }, - member_of=[ - ['baz'], - ['foo', 'bar'] - ] - ) - self.assertEqual( - 'RequestGroup(use_same_provider=True, ' - 'resources={CUSTOM_MAGIC:1, VCPU:2}, ' - 'traits=[CUSTOM_PHYSNET1, CUSTOM_VNIC_TYPE_NORMAL, ' - '!CUSTOM_PHYSNET2, !CUSTOM_VNIC_TYPE_DIRECT], ' - 'aggregates=[[baz], [foo, bar]])', - str(grp)) - - -class TestEnsureConsumer(testtools.TestCase): - def setUp(self): - super(TestEnsureConsumer, self).setUp() - self.mock_project_get = self.useFixture(fixtures.MockPatch( - 'nova.api.openstack.placement.objects.project.' - 'Project.get_by_external_id')).mock - self.mock_user_get = self.useFixture(fixtures.MockPatch( - 'nova.api.openstack.placement.objects.user.' - 'User.get_by_external_id')).mock - self.mock_consumer_get = self.useFixture(fixtures.MockPatch( - 'nova.api.openstack.placement.objects.consumer.' - 'Consumer.get_by_uuid')).mock - self.mock_project_create = self.useFixture(fixtures.MockPatch( - 'nova.api.openstack.placement.objects.project.' - 'Project.create')).mock - self.mock_user_create = self.useFixture(fixtures.MockPatch( - 'nova.api.openstack.placement.objects.user.' - 'User.create')).mock - self.mock_consumer_create = self.useFixture(fixtures.MockPatch( - 'nova.api.openstack.placement.objects.consumer.' - 'Consumer.create')).mock - self.ctx = mock.sentinel.ctx - self.consumer_id = uuidsentinel.consumer - self.project_id = uuidsentinel.project - self.user_id = uuidsentinel.user - mv_parsed = microversion_parse.Version(1, 27) - mv_parsed.max_version = microversion_parse.parse_version_string( - microversion.max_version_string()) - mv_parsed.min_version = microversion_parse.parse_version_string( - microversion.min_version_string()) - self.before_version = mv_parsed - mv_parsed = microversion_parse.Version(1, 28) - mv_parsed.max_version = microversion_parse.parse_version_string( - microversion.max_version_string()) - mv_parsed.min_version = microversion_parse.parse_version_string( - microversion.min_version_string()) - self.after_version = mv_parsed - - def test_no_existing_project_user_consumer_before_gen_success(self): - """Tests that we don't require a consumer_generation=None before the - appropriate microversion. - """ - self.mock_project_get.side_effect = exception.NotFound - self.mock_user_get.side_effect = exception.NotFound - self.mock_consumer_get.side_effect = exception.NotFound - - consumer_gen = 1 # should be ignored - util.ensure_consumer( - self.ctx, self.consumer_id, self.project_id, self.user_id, - consumer_gen, self.before_version) - - self.mock_project_get.assert_called_once_with( - self.ctx, self.project_id) - self.mock_user_get.assert_called_once_with( - self.ctx, self.user_id) - self.mock_consumer_get.assert_called_once_with( - self.ctx, self.consumer_id) - self.mock_project_create.assert_called_once() - self.mock_user_create.assert_called_once() - self.mock_consumer_create.assert_called_once() - - def test_no_existing_project_user_consumer_after_gen_success(self): - """Tests that we require a consumer_generation=None after the - appropriate microversion. - """ - self.mock_project_get.side_effect = exception.NotFound - self.mock_user_get.side_effect = exception.NotFound - self.mock_consumer_get.side_effect = exception.NotFound - - consumer_gen = None # should NOT be ignored (and None is expected) - util.ensure_consumer( - self.ctx, self.consumer_id, self.project_id, self.user_id, - consumer_gen, self.after_version) - - self.mock_project_get.assert_called_once_with( - self.ctx, self.project_id) - self.mock_user_get.assert_called_once_with( - self.ctx, self.user_id) - self.mock_consumer_get.assert_called_once_with( - self.ctx, self.consumer_id) - self.mock_project_create.assert_called_once() - self.mock_user_create.assert_called_once() - self.mock_consumer_create.assert_called_once() - - def test_no_existing_project_user_consumer_after_gen_fail(self): - """Tests that we require a consumer_generation=None after the - appropriate microversion and that None is the expected value. - """ - self.mock_project_get.side_effect = exception.NotFound - self.mock_user_get.side_effect = exception.NotFound - self.mock_consumer_get.side_effect = exception.NotFound - - consumer_gen = 1 # should NOT be ignored (and 1 is not expected) - self.assertRaises( - webob.exc.HTTPConflict, - util.ensure_consumer, - self.ctx, self.consumer_id, self.project_id, self.user_id, - consumer_gen, self.after_version) - - def test_no_existing_project_user_consumer_use_incomplete(self): - """Verify that if the project_id arg is None, that we fall back to the - CONF options for incomplete project and user ID. - """ - self.mock_project_get.side_effect = exception.NotFound - self.mock_user_get.side_effect = exception.NotFound - self.mock_consumer_get.side_effect = exception.NotFound - - consumer_gen = None # should NOT be ignored (and None is expected) - util.ensure_consumer( - self.ctx, self.consumer_id, None, None, - consumer_gen, self.before_version) - - self.mock_project_get.assert_called_once_with( - self.ctx, CONF.placement.incomplete_consumer_project_id) - self.mock_user_get.assert_called_once_with( - self.ctx, CONF.placement.incomplete_consumer_user_id) - self.mock_consumer_get.assert_called_once_with( - self.ctx, self.consumer_id) - self.mock_project_create.assert_called_once() - self.mock_user_create.assert_called_once() - self.mock_consumer_create.assert_called_once() - - def test_existing_project_no_existing_consumer_before_gen_success(self): - """Check that if we find an existing project and user, that we use - those found objects in creating the consumer. Do not require a consumer - generation before the appropriate microversion. - """ - proj = project_obj.Project(self.ctx, id=1, external_id=self.project_id) - self.mock_project_get.return_value = proj - user = user_obj.User(self.ctx, id=1, external_id=self.user_id) - self.mock_user_get.return_value = user - self.mock_consumer_get.side_effect = exception.NotFound - - consumer_gen = None # should be ignored - util.ensure_consumer( - self.ctx, self.consumer_id, self.project_id, self.user_id, - consumer_gen, self.before_version) - - self.mock_project_create.assert_not_called() - self.mock_user_create.assert_not_called() - self.mock_consumer_create.assert_called_once() - - def test_existing_consumer_after_gen_matches_supplied_gen(self): - """Tests that we require a consumer_generation after the - appropriate microversion and that when the consumer already exists, - then we ensure a matching generation is supplied - """ - proj = project_obj.Project(self.ctx, id=1, external_id=self.project_id) - self.mock_project_get.return_value = proj - user = user_obj.User(self.ctx, id=1, external_id=self.user_id) - self.mock_user_get.return_value = user - consumer = consumer_obj.Consumer( - self.ctx, id=1, project=proj, user=user, generation=2) - self.mock_consumer_get.return_value = consumer - - consumer_gen = 2 # should NOT be ignored (and 2 is expected) - util.ensure_consumer( - self.ctx, self.consumer_id, self.project_id, self.user_id, - consumer_gen, self.after_version) - - self.mock_project_create.assert_not_called() - self.mock_user_create.assert_not_called() - self.mock_consumer_create.assert_not_called() - - def test_existing_consumer_after_gen_fail(self): - """Tests that we require a consumer_generation after the - appropriate microversion and that when the consumer already exists, - then we raise a 400 when there is a mismatch on the existing - generation. - """ - proj = project_obj.Project(self.ctx, id=1, external_id=self.project_id) - self.mock_project_get.return_value = proj - user = user_obj.User(self.ctx, id=1, external_id=self.user_id) - self.mock_user_get.return_value = user - consumer = consumer_obj.Consumer( - self.ctx, id=1, project=proj, user=user, generation=42) - self.mock_consumer_get.return_value = consumer - - consumer_gen = 2 # should NOT be ignored (and 2 is NOT expected) - self.assertRaises( - webob.exc.HTTPConflict, - util.ensure_consumer, - self.ctx, self.consumer_id, self.project_id, self.user_id, - consumer_gen, self.after_version) diff --git a/nova/tests/unit/api_samples_test_base/__init__.py b/nova/tests/unit/api_samples_test_base/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py index 95903881f8cc..c263a4fdf4dc 100644 --- a/nova/tests/unit/cmd/test_status.py +++ b/nova/tests/unit/cmd/test_status.py @@ -16,6 +16,10 @@ Unit tests for the nova-status CLI interfaces. """ +# NOTE(cdent): Additional tests of nova-status may be found in +# nova/tests/functional/test_nova_status.py. Those tests use the external +# PlacementFixture, which is only available in functioanl tests. + import fixtures import mock from six.moves import StringIO @@ -27,7 +31,6 @@ from oslo_utils.fixture import uuidsentinel as uuids from oslo_utils import uuidutils from requests import models -from nova.api.openstack.placement.objects import resource_provider as rp_obj from nova.cmd import status import nova.conf from nova import context @@ -35,7 +38,6 @@ from nova import context # in the tests, we don't use them in the actual CLI. from nova import objects from nova.objects import request_spec as reqspec_obj -from nova import rc_fields as fields from nova import test from nova.tests import fixtures as nova_fixtures @@ -448,246 +450,6 @@ class TestUpgradeCheckCellsV2(test.NoDBTestCase): self.assertIsNone(result.details) -# This is what the ResourceTracker sets up in the nova-compute service. -FAKE_VCPU_INVENTORY = { - 'resource_class': fields.ResourceClass.VCPU, - 'total': 32, - 'reserved': 4, - 'min_unit': 1, - 'max_unit': 1, - 'step_size': 1, - 'allocation_ratio': 1.0, -} - -# This is the kind of thing that Neutron will setup externally for routed -# networks. -FAKE_IP_POOL_INVENTORY = { - 'resource_class': fields.ResourceClass.IPV4_ADDRESS, - 'total': 256, - 'reserved': 10, - 'min_unit': 1, - 'max_unit': 1, - 'step_size': 1, - 'allocation_ratio': 1.0, -} - - -class TestUpgradeCheckResourceProviders(test.NoDBTestCase): - """Tests for the nova-status upgrade check on resource providers.""" - - # We'll setup the database ourselves because we need to use cells fixtures - # for multiple cell mappings. - USES_DB_SELF = True - - def setUp(self): - super(TestUpgradeCheckResourceProviders, self).setUp() - self.output = StringIO() - self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output)) - # We always need the API DB to be setup. - self.useFixture(nova_fixtures.Database(database='api')) - self.useFixture(nova_fixtures.Database(database='placement')) - self.cmd = status.UpgradeCommands() - rp_obj.ensure_rc_cache(context.get_admin_context()) - - def test_check_resource_providers_fresh_install_no_mappings(self): - """Tests the scenario where we don't have any cell mappings (no cells - v2 setup yet) and no compute nodes in the single main database. - """ - # We don't have a cell mapping, just the regular old main database - # because let's assume they haven't run simple_cell_setup yet. - self.useFixture(nova_fixtures.Database()) - result = self.cmd._check_resource_providers() - # this is assumed to be base install so it's OK but with details - self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) - self.assertIn('There are no compute resource providers in the ' - 'Placement service nor are there compute nodes in the ' - 'database', - result.details) - - def test_check_resource_providers_no_rps_no_computes_in_cell1(self): - """Tests the scenario where we have a cell mapping with no computes in - it and no resource providers (because of no computes). - """ - # this will setup two cell mappings, one for cell0 and a single cell1 - self._setup_cells() - # there are no compute nodes in the cell1 database so we have 0 - # resource providers and 0 compute nodes, so it's assumed to be a fresh - # install and not a failure. - result = self.cmd._check_resource_providers() - # this is assumed to be base install so it's OK but with details - self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) - self.assertIn('There are no compute resource providers in the ' - 'Placement service nor are there compute nodes in the ' - 'database', - result.details) - - def test_check_resource_providers_no_rps_one_compute(self): - """Tests the scenario where we have compute nodes in the cell but no - resource providers yet - VCPU or otherwise. This is a warning because - the compute isn't reporting into placement. - """ - self._setup_cells() - # create a compute node which will be in cell1 by default - cn = objects.ComputeNode( - context=context.get_admin_context(), - host='fake-host', - vcpus=4, - memory_mb=8 * 1024, - local_gb=40, - vcpus_used=2, - memory_mb_used=2 * 1024, - local_gb_used=10, - hypervisor_type='fake', - hypervisor_version=1, - cpu_info='{"arch": "x86_64"}') - cn.create() - result = self.cmd._check_resource_providers() - self.assertEqual(status.UpgradeCheckCode.WARNING, result.code) - self.assertIn('There are no compute resource providers in the ' - 'Placement service but there are 1 compute nodes in the ' - 'deployment.', result.details) - - def _create_resource_provider(self, inventory): - """Helper method to create a resource provider with inventory""" - ctxt = context.get_admin_context() - rp_uuid = uuidutils.generate_uuid() - rp = rp_obj.ResourceProvider( - context=ctxt, - name=rp_uuid, - uuid=rp_uuid) - rp.create() - inv = rp_obj.Inventory( - context=ctxt, - resource_provider=rp, - **inventory) - inv_list = rp_obj.InventoryList(objects=[inv]) - rp.set_inventory(inv_list) - return rp - - def test_check_resource_providers_no_compute_rps_one_compute(self): - """Tests the scenario where we have compute nodes in the cell but no - compute (VCPU) resource providers yet. This is a failure warning the - compute isn't reporting into placement. - """ - self._setup_cells() - # create a compute node which will be in cell1 by default - cn = objects.ComputeNode( - context=context.get_admin_context(), - host='fake-host', - vcpus=4, - memory_mb=8 * 1024, - local_gb=40, - vcpus_used=2, - memory_mb_used=2 * 1024, - local_gb_used=10, - hypervisor_type='fake', - hypervisor_version=1, - cpu_info='{"arch": "x86_64"}') - cn.create() - - # create a single resource provider that represents an external shared - # IP allocation pool - this tests our filtering when counting resource - # providers - self._create_resource_provider(FAKE_IP_POOL_INVENTORY) - - result = self.cmd._check_resource_providers() - self.assertEqual(status.UpgradeCheckCode.WARNING, result.code) - self.assertIn('There are no compute resource providers in the ' - 'Placement service but there are 1 compute nodes in the ' - 'deployment.', result.details) - - def test_check_resource_providers_fewer_rps_than_computes(self): - """Tests the scenario that we have fewer resource providers than - compute nodes which is a warning because we're underutilized. - """ - # setup the cell0 and cell1 mappings - self._setup_cells() - - # create two compute nodes (by default in cell1) - ctxt = context.get_admin_context() - for x in range(2): - cn = objects.ComputeNode( - context=ctxt, - host=getattr(uuids, str(x)), - vcpus=4, - memory_mb=8 * 1024, - local_gb=40, - vcpus_used=2, - memory_mb_used=2 * 1024, - local_gb_used=10, - hypervisor_type='fake', - hypervisor_version=1, - cpu_info='{"arch": "x86_64"}') - cn.create() - - # create a single resource provider with some VCPU inventory - self._create_resource_provider(FAKE_VCPU_INVENTORY) - - result = self.cmd._check_resource_providers() - self.assertEqual(status.UpgradeCheckCode.WARNING, result.code) - self.assertIn('There are 1 compute resource providers and 2 compute ' - 'nodes in the deployment.', result.details) - - def test_check_resource_providers_equal_rps_to_computes(self): - """This tests the happy path scenario where we have an equal number - of compute resource providers to compute nodes. - """ - # setup the cell0 and cell1 mappings - self._setup_cells() - - # create a single compute node - ctxt = context.get_admin_context() - cn = objects.ComputeNode( - context=ctxt, - host=uuids.host, - vcpus=4, - memory_mb=8 * 1024, - local_gb=40, - vcpus_used=2, - memory_mb_used=2 * 1024, - local_gb_used=10, - hypervisor_type='fake', - hypervisor_version=1, - cpu_info='{"arch": "x86_64"}') - cn.create() - - # create a deleted compute node record (shouldn't count) - cn2 = objects.ComputeNode( - context=ctxt, - host='fakehost', - vcpus=4, - memory_mb=8 * 1024, - local_gb=40, - vcpus_used=2, - memory_mb_used=2 * 1024, - local_gb_used=10, - hypervisor_type='fake', - hypervisor_version=1, - cpu_info='{"arch": "x86_64"}') - cn2.create() - cn2.destroy() - - # create a single resource provider with some VCPU inventory - self._create_resource_provider(FAKE_VCPU_INVENTORY) - # create an externally shared IP allocation pool resource provider - self._create_resource_provider(FAKE_IP_POOL_INVENTORY) - - # Stub out _count_compute_nodes to make sure we never call it without - # a cell-targeted context. - original_count_compute_nodes = ( - status.UpgradeCommands._count_compute_nodes) - - def stub_count_compute_nodes(_self, context=None): - self.assertIsNotNone(context.db_connection) - return original_count_compute_nodes(_self, context=context) - self.stub_out('nova.cmd.status.UpgradeCommands._count_compute_nodes', - stub_count_compute_nodes) - - result = self.cmd._check_resource_providers() - self.assertEqual(status.UpgradeCheckCode.SUCCESS, result.code) - self.assertIsNone(result.details) - - class TestUpgradeCheckIronicFlavorMigration(test.NoDBTestCase): """Tests for the nova-status upgrade check on ironic flavor migration.""" diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py index 6870a305982b..3fff3763d686 100644 --- a/nova/tests/unit/test_fixtures.py +++ b/nova/tests/unit/test_fixtures.py @@ -38,7 +38,6 @@ from nova.objects import service as service_obj from nova import test from nova.tests import fixtures from nova.tests.unit import conf_fixture -from nova.tests.unit import policy_fixture from nova import utils CONF = cfg.CONF @@ -475,34 +474,6 @@ class TestSingleCellSimpleFixture(testtools.TestCase): self.assertIs(mock.sentinel.context, c) -class TestPlacementFixture(testtools.TestCase): - def setUp(self): - super(TestPlacementFixture, self).setUp() - # We need ConfFixture since PlacementPolicyFixture reads from config. - self.useFixture(conf_fixture.ConfFixture()) - # We need PlacementPolicyFixture because placement-api checks policy. - self.useFixture(policy_fixture.PlacementPolicyFixture()) - # Database is needed to start placement API - self.useFixture(fixtures.Database(database='placement')) - - def test_responds_to_version(self): - """Ensure the Placement server responds to calls sensibly.""" - placement_fixture = self.useFixture(fixtures.PlacementFixture()) - - # request the API root, which provides us the versions of the API - resp = placement_fixture._fake_get(None, '/') - self.assertEqual(200, resp.status_code) - - # request a known bad url, and we should get a 404 - resp = placement_fixture._fake_get(None, '/foo') - self.assertEqual(404, resp.status_code) - - # unsets the token so we fake missing it - placement_fixture.token = None - resp = placement_fixture._fake_get(None, '/foo') - self.assertEqual(401, resp.status_code) - - class TestWarningsFixture(test.TestCase): def test_invalid_uuid_errors(self): """Creating an oslo.versionedobject with an invalid UUID value for a diff --git a/tox.ini b/tox.ini index e3e88f6ab7df..0c32d3b97eb6 100644 --- a/tox.ini +++ b/tox.ini @@ -73,9 +73,23 @@ commands = [testenv:functional] # TODO(melwitt): This can be removed when functional tests are gating with # python 3.x +# NOTE(cdent): For a while, we shared functional virtualenvs with the unit +# tests, to save some time. However, this conflicts with tox siblings in zuul, +# and we need siblings to make testing against master of other projects work. basepython = python2.7 -envdir = {toxworkdir}/py27 setenv = {[testenv]setenv} +# As nova functional tests import the PlacementFixture from the placement +# repository these tests are, by default, set up to run with latest master from +# the placement repo. In the gate, Zuul will clone the latest master from +# placement OR the version of placement the Depends-On in the commit message +# suggests. If you want to run the test locally with an un-merged placement +# change, modify this line locally to point to your dependency or pip install +# placement into the appropriate tox virtualenv. We express the requirement +# here instead of test-requirements because we do not want placement present +# during unit tests. +deps = + -r{toxinidir}/test-requirements.txt + git+https://git.openstack.org/openstack/placement#egg=openstack-placement commands = {[testenv]commands} # NOTE(cdent): The group_regex describes how stestr will group tests into the @@ -93,15 +107,15 @@ commands = # with python 3.5 [testenv:functional-py35] basepython = python3.5 -envdir = {toxworkdir}/py35 setenv = {[testenv]setenv} +deps = {[testenv:functional]deps} commands = {[testenv:functional]commands} [testenv:functional-py36] basepython = python3.6 -envdir = {toxworkdir}/py36 setenv = {[testenv]setenv} +deps = {[testenv:functional]deps} commands = {[testenv:functional]commands}