Merge "Move test_report_client out of placement namespace"
This commit is contained in:
commit
4c3ac424d8
@ -1,896 +0,0 @@
|
|||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from keystoneauth1 import adapter
|
|
||||||
from keystoneauth1 import session
|
|
||||||
import mock
|
|
||||||
import requests
|
|
||||||
from wsgi_intercept import interceptor
|
|
||||||
|
|
||||||
from nova.api.openstack.placement import deploy
|
|
||||||
from nova.compute import provider_tree
|
|
||||||
from nova import conf
|
|
||||||
from nova import context
|
|
||||||
# TODO(cdent): This points to the nova, not placement, exception for
|
|
||||||
# InvalidResourceClass. This test should probably move out of the
|
|
||||||
# placement hierarchy since it expects a "standard" placement server
|
|
||||||
# and is not testing the placement service itself.
|
|
||||||
from nova import exception
|
|
||||||
from nova import objects
|
|
||||||
from nova import rc_fields as fields
|
|
||||||
from nova.scheduler.client import report
|
|
||||||
from nova import test
|
|
||||||
from nova.tests import uuidsentinel as uuids
|
|
||||||
|
|
||||||
CONF = conf.CONF
|
|
||||||
|
|
||||||
|
|
||||||
class NoAuthReportClient(report.SchedulerReportClient):
|
|
||||||
"""A SchedulerReportClient that avoids keystone."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(NoAuthReportClient, self).__init__()
|
|
||||||
# Supply our own session so the wsgi-intercept can intercept
|
|
||||||
# the right thing. Another option would be to use the direct
|
|
||||||
# urllib3 interceptor.
|
|
||||||
request_session = requests.Session()
|
|
||||||
headers = {
|
|
||||||
'x-auth-token': 'admin',
|
|
||||||
'OpenStack-API-Version': 'placement latest',
|
|
||||||
}
|
|
||||||
self._client = adapter.Adapter(
|
|
||||||
session.Session(auth=None, session=request_session,
|
|
||||||
additional_headers=headers),
|
|
||||||
service_type='placement')
|
|
||||||
|
|
||||||
|
|
||||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
|
||||||
new=mock.Mock(return_value=False))
|
|
||||||
@mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
|
|
||||||
@mock.patch('keystoneauth1.session.Session.get_auth_headers',
|
|
||||||
new=mock.Mock(return_value={'x-auth-token': 'admin'}))
|
|
||||||
@mock.patch('keystoneauth1.session.Session.get_endpoint',
|
|
||||||
new=mock.Mock(return_value='http://localhost:80/placement'))
|
|
||||||
class SchedulerReportClientTests(test.TestCase):
|
|
||||||
"""Set up an intercepted placement API to test against."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(SchedulerReportClientTests, self).setUp()
|
|
||||||
self.flags(auth_strategy='noauth2', group='api')
|
|
||||||
|
|
||||||
self.app = lambda: deploy.loadapp(CONF)
|
|
||||||
self.client = NoAuthReportClient()
|
|
||||||
# TODO(cdent): Port required here to deal with a bug
|
|
||||||
# in wsgi-intercept:
|
|
||||||
# https://github.com/cdent/wsgi-intercept/issues/41
|
|
||||||
self.url = 'http://localhost:80/placement'
|
|
||||||
self.compute_uuid = uuids.compute_node
|
|
||||||
self.compute_name = 'computehost'
|
|
||||||
self.compute_node = objects.ComputeNode(
|
|
||||||
uuid=self.compute_uuid,
|
|
||||||
hypervisor_hostname=self.compute_name,
|
|
||||||
vcpus=2,
|
|
||||||
cpu_allocation_ratio=16.0,
|
|
||||||
memory_mb=2048,
|
|
||||||
ram_allocation_ratio=1.5,
|
|
||||||
local_gb=1024,
|
|
||||||
disk_allocation_ratio=1.0)
|
|
||||||
|
|
||||||
self.instance_uuid = uuids.inst
|
|
||||||
self.instance = objects.Instance(
|
|
||||||
uuid=self.instance_uuid,
|
|
||||||
project_id = uuids.project,
|
|
||||||
user_id = uuids.user,
|
|
||||||
flavor=objects.Flavor(root_gb=10,
|
|
||||||
swap=1,
|
|
||||||
ephemeral_gb=100,
|
|
||||||
memory_mb=1024,
|
|
||||||
vcpus=2,
|
|
||||||
extra_specs={}))
|
|
||||||
self.context = context.get_admin_context()
|
|
||||||
|
|
||||||
def _interceptor(self):
|
|
||||||
# Isolate this initialization for maintainability.
|
|
||||||
return interceptor.RequestsInterceptor(app=self.app, url=self.url)
|
|
||||||
|
|
||||||
def test_client_report_smoke(self):
|
|
||||||
"""Check things go as expected when doing the right things."""
|
|
||||||
# TODO(cdent): We should probably also have a test that
|
|
||||||
# tests that when allocation or inventory errors happen, we
|
|
||||||
# are resilient.
|
|
||||||
res_class = fields.ResourceClass.VCPU
|
|
||||||
with self._interceptor():
|
|
||||||
# When we start out there are no resource providers.
|
|
||||||
rp = self.client._get_resource_provider(self.context,
|
|
||||||
self.compute_uuid)
|
|
||||||
self.assertIsNone(rp)
|
|
||||||
rps = self.client._get_providers_in_tree(self.context,
|
|
||||||
self.compute_uuid)
|
|
||||||
self.assertEqual([], rps)
|
|
||||||
# But get_provider_tree_and_ensure_root creates one (via
|
|
||||||
# _ensure_resource_provider)
|
|
||||||
ptree = self.client.get_provider_tree_and_ensure_root(
|
|
||||||
self.context, self.compute_uuid)
|
|
||||||
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
|
|
||||||
|
|
||||||
# Now let's update status for our compute node.
|
|
||||||
self.client.update_compute_node(self.context, self.compute_node)
|
|
||||||
|
|
||||||
# So now we have a resource provider
|
|
||||||
rp = self.client._get_resource_provider(self.context,
|
|
||||||
self.compute_uuid)
|
|
||||||
self.assertIsNotNone(rp)
|
|
||||||
rps = self.client._get_providers_in_tree(self.context,
|
|
||||||
self.compute_uuid)
|
|
||||||
self.assertEqual(1, len(rps))
|
|
||||||
|
|
||||||
# We should also have empty sets of aggregate and trait
|
|
||||||
# associations
|
|
||||||
self.assertEqual(
|
|
||||||
[], self.client._get_sharing_providers(self.context,
|
|
||||||
[uuids.agg]))
|
|
||||||
self.assertFalse(
|
|
||||||
self.client._provider_tree.have_aggregates_changed(
|
|
||||||
self.compute_uuid, []))
|
|
||||||
self.assertFalse(
|
|
||||||
self.client._provider_tree.have_traits_changed(
|
|
||||||
self.compute_uuid, []))
|
|
||||||
|
|
||||||
# TODO(cdent): change this to use the methods built in
|
|
||||||
# to the report client to retrieve inventory?
|
|
||||||
inventory_url = ('/resource_providers/%s/inventories' %
|
|
||||||
self.compute_uuid)
|
|
||||||
resp = self.client.get(inventory_url)
|
|
||||||
inventory_data = resp.json()['inventories']
|
|
||||||
self.assertEqual(self.compute_node.vcpus,
|
|
||||||
inventory_data[res_class]['total'])
|
|
||||||
|
|
||||||
# Providers and inventory show up nicely in the provider tree
|
|
||||||
ptree = self.client.get_provider_tree_and_ensure_root(
|
|
||||||
self.context, self.compute_uuid)
|
|
||||||
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
|
|
||||||
self.assertTrue(ptree.has_inventory(self.compute_uuid))
|
|
||||||
|
|
||||||
# Update allocations with our instance
|
|
||||||
self.client.update_instance_allocation(
|
|
||||||
self.context, self.compute_node, self.instance, 1)
|
|
||||||
|
|
||||||
# Check that allocations were made
|
|
||||||
resp = self.client.get('/allocations/%s' % self.instance_uuid)
|
|
||||||
alloc_data = resp.json()['allocations']
|
|
||||||
vcpu_data = alloc_data[self.compute_uuid]['resources'][res_class]
|
|
||||||
self.assertEqual(2, vcpu_data)
|
|
||||||
|
|
||||||
# Check that usages are up to date
|
|
||||||
resp = self.client.get('/resource_providers/%s/usages' %
|
|
||||||
self.compute_uuid)
|
|
||||||
usage_data = resp.json()['usages']
|
|
||||||
vcpu_data = usage_data[res_class]
|
|
||||||
self.assertEqual(2, vcpu_data)
|
|
||||||
|
|
||||||
# Delete allocations with our instance
|
|
||||||
self.client.update_instance_allocation(
|
|
||||||
self.context, self.compute_node, self.instance, -1)
|
|
||||||
|
|
||||||
# No usage
|
|
||||||
resp = self.client.get('/resource_providers/%s/usages' %
|
|
||||||
self.compute_uuid)
|
|
||||||
usage_data = resp.json()['usages']
|
|
||||||
vcpu_data = usage_data[res_class]
|
|
||||||
self.assertEqual(0, vcpu_data)
|
|
||||||
|
|
||||||
# Trigger the reporting client deleting all inventory by setting
|
|
||||||
# the compute node's CPU, RAM and disk amounts to 0.
|
|
||||||
self.compute_node.vcpus = 0
|
|
||||||
self.compute_node.memory_mb = 0
|
|
||||||
self.compute_node.local_gb = 0
|
|
||||||
self.client.update_compute_node(self.context, self.compute_node)
|
|
||||||
|
|
||||||
# Check there's no more inventory records
|
|
||||||
resp = self.client.get(inventory_url)
|
|
||||||
inventory_data = resp.json()['inventories']
|
|
||||||
self.assertEqual({}, inventory_data)
|
|
||||||
|
|
||||||
# Build the provider tree afresh.
|
|
||||||
ptree = self.client.get_provider_tree_and_ensure_root(
|
|
||||||
self.context, self.compute_uuid)
|
|
||||||
# The compute node is still there
|
|
||||||
self.assertEqual([self.compute_uuid], ptree.get_provider_uuids())
|
|
||||||
# But the inventory is gone
|
|
||||||
self.assertFalse(ptree.has_inventory(self.compute_uuid))
|
|
||||||
|
|
||||||
# Try setting some invalid inventory and make sure the report
|
|
||||||
# client raises the expected error.
|
|
||||||
inv_data = {
|
|
||||||
'CUSTOM_BOGU$_CLA$$': {
|
|
||||||
'total': 100,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 100,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
self.assertRaises(exception.InvalidResourceClass,
|
|
||||||
self.client.set_inventory_for_provider,
|
|
||||||
self.context, self.compute_uuid,
|
|
||||||
self.compute_name, inv_data)
|
|
||||||
|
|
||||||
@mock.patch('nova.compute.utils.is_volume_backed_instance',
|
|
||||||
new=mock.Mock(return_value=False))
|
|
||||||
@mock.patch('nova.objects.compute_node.ComputeNode.save', new=mock.Mock())
|
|
||||||
@mock.patch('keystoneauth1.session.Session.get_auth_headers',
|
|
||||||
new=mock.Mock(return_value={'x-auth-token': 'admin'}))
|
|
||||||
@mock.patch('keystoneauth1.session.Session.get_endpoint',
|
|
||||||
new=mock.Mock(return_value='http://localhost:80/placement'))
|
|
||||||
def test_ensure_standard_resource_class(self):
|
|
||||||
"""Test case for bug #1746615: If placement is running a newer version
|
|
||||||
of code than compute, it may have new standard resource classes we
|
|
||||||
don't know about. Make sure this scenario doesn't cause errors in
|
|
||||||
set_inventory_for_provider.
|
|
||||||
"""
|
|
||||||
inv = {
|
|
||||||
'VCPU': {
|
|
||||||
'total': 10,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 2,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 10.0,
|
|
||||||
},
|
|
||||||
'MEMORY_MB': {
|
|
||||||
'total': 1048576,
|
|
||||||
'reserved': 2048,
|
|
||||||
'min_unit': 1024,
|
|
||||||
'max_unit': 131072,
|
|
||||||
'step_size': 1024,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
'DISK_GB': {
|
|
||||||
'total': 100,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 10,
|
|
||||||
'step_size': 2,
|
|
||||||
'allocation_ratio': 10.0,
|
|
||||||
},
|
|
||||||
# A standard resource class known by placement, but not locally
|
|
||||||
'PCI_DEVICE': {
|
|
||||||
'total': 4,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 4,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
'CUSTOM_BANDWIDTH': {
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
with interceptor.RequestsInterceptor(app=self.app, url=self.url):
|
|
||||||
self.client.update_compute_node(self.context, self.compute_node)
|
|
||||||
self.client.set_inventory_for_provider(
|
|
||||||
self.context, self.compute_uuid, self.compute_name, inv)
|
|
||||||
|
|
||||||
@mock.patch('keystoneauth1.session.Session.get_endpoint',
|
|
||||||
return_value='http://localhost:80/placement')
|
|
||||||
def test_global_request_id(self, mock_endpoint):
|
|
||||||
global_request_id = 'req-%s' % uuids.global_request_id
|
|
||||||
|
|
||||||
def assert_app(environ, start_response):
|
|
||||||
# Assert the 'X-Openstack-Request-Id' header in the request.
|
|
||||||
self.assertIn('HTTP_X_OPENSTACK_REQUEST_ID', environ)
|
|
||||||
self.assertEqual(global_request_id,
|
|
||||||
environ['HTTP_X_OPENSTACK_REQUEST_ID'])
|
|
||||||
start_response('204 OK', [])
|
|
||||||
return []
|
|
||||||
|
|
||||||
with interceptor.RequestsInterceptor(
|
|
||||||
app=lambda: assert_app, url=self.url):
|
|
||||||
self.client._delete_provider(self.compute_uuid,
|
|
||||||
global_request_id=global_request_id)
|
|
||||||
payload = {
|
|
||||||
'name': 'test-resource-provider'
|
|
||||||
}
|
|
||||||
self.client.post('/resource_providers', payload,
|
|
||||||
global_request_id=global_request_id)
|
|
||||||
self.client.put('/resource_providers/%s' % self.compute_uuid,
|
|
||||||
payload,
|
|
||||||
global_request_id=global_request_id)
|
|
||||||
self.client.get('/resource_providers/%s' % self.compute_uuid,
|
|
||||||
global_request_id=global_request_id)
|
|
||||||
|
|
||||||
def test_get_provider_tree_with_nested_and_aggregates(self):
|
|
||||||
"""A more in-depth test of get_provider_tree_and_ensure_root with
|
|
||||||
nested and sharing resource providers.
|
|
||||||
|
|
||||||
ss1(DISK) ss2(DISK) ss3(DISK)
|
|
||||||
agg_disk_1 \ / agg_disk_2 | agg_disk_3
|
|
||||||
cn(VCPU,MEM,DISK) x
|
|
||||||
/ \
|
|
||||||
pf1(VF,BW) pf2(VF,BW) sbw(BW)
|
|
||||||
agg_ip \ / agg_ip | agg_bw
|
|
||||||
sip(IP) x
|
|
||||||
|
|
||||||
"""
|
|
||||||
with self._interceptor():
|
|
||||||
# Register the compute node and its inventory
|
|
||||||
self.client.update_compute_node(self.context, self.compute_node)
|
|
||||||
# The compute node is associated with two of the shared storages
|
|
||||||
self.client.set_aggregates_for_provider(
|
|
||||||
self.context, self.compute_uuid,
|
|
||||||
set([uuids.agg_disk_1, uuids.agg_disk_2]))
|
|
||||||
|
|
||||||
# Register two SR-IOV PFs with VF and bandwidth inventory
|
|
||||||
for x in (1, 2):
|
|
||||||
name = 'pf%d' % x
|
|
||||||
uuid = getattr(uuids, name)
|
|
||||||
self.client.set_inventory_for_provider(
|
|
||||||
self.context, uuid, name, {
|
|
||||||
fields.ResourceClass.SRIOV_NET_VF: {
|
|
||||||
'total': 24 * x,
|
|
||||||
'reserved': x,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 24 * x,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
'CUSTOM_BANDWIDTH': {
|
|
||||||
'total': 125000 * x,
|
|
||||||
'reserved': 1000 * x,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 25000 * x,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
}, parent_provider_uuid=self.compute_uuid)
|
|
||||||
# They're associated with an IP address aggregate
|
|
||||||
self.client.set_aggregates_for_provider(self.context, uuid,
|
|
||||||
[uuids.agg_ip])
|
|
||||||
# Set some traits on 'em
|
|
||||||
self.client.set_traits_for_provider(
|
|
||||||
self.context, uuid, ['CUSTOM_PHYSNET_%d' % x])
|
|
||||||
|
|
||||||
# Register three shared storage pools with disk inventory
|
|
||||||
for x in (1, 2, 3):
|
|
||||||
name = 'ss%d' % x
|
|
||||||
uuid = getattr(uuids, name)
|
|
||||||
self.client.set_inventory_for_provider(
|
|
||||||
self.context, uuid, name, {
|
|
||||||
fields.ResourceClass.DISK_GB: {
|
|
||||||
'total': 100 * x,
|
|
||||||
'reserved': x,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 10 * x,
|
|
||||||
'step_size': 2,
|
|
||||||
'allocation_ratio': 10.0,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
# Mark as a sharing provider
|
|
||||||
self.client.set_traits_for_provider(
|
|
||||||
self.context, uuid, ['MISC_SHARES_VIA_AGGREGATE'])
|
|
||||||
# Associate each with its own aggregate. The compute node is
|
|
||||||
# associated with the first two (agg_disk_1 and agg_disk_2).
|
|
||||||
agg = getattr(uuids, 'agg_disk_%d' % x)
|
|
||||||
self.client.set_aggregates_for_provider(self.context, uuid,
|
|
||||||
[agg])
|
|
||||||
|
|
||||||
# Register a shared IP address provider with IP address inventory
|
|
||||||
self.client.set_inventory_for_provider(
|
|
||||||
self.context, uuids.sip, 'sip', {
|
|
||||||
fields.ResourceClass.IPV4_ADDRESS: {
|
|
||||||
'total': 128,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 8,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
# Mark as a sharing provider, and add another trait
|
|
||||||
self.client.set_traits_for_provider(
|
|
||||||
self.context, uuids.sip,
|
|
||||||
set(['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']))
|
|
||||||
# It's associated with the same aggregate as both PFs
|
|
||||||
self.client.set_aggregates_for_provider(self.context, uuids.sip,
|
|
||||||
[uuids.agg_ip])
|
|
||||||
|
|
||||||
# Register a shared network bandwidth provider
|
|
||||||
self.client.set_inventory_for_provider(
|
|
||||||
self.context, uuids.sbw, 'sbw', {
|
|
||||||
'CUSTOM_BANDWIDTH': {
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
# Mark as a sharing provider
|
|
||||||
self.client.set_traits_for_provider(
|
|
||||||
self.context, uuids.sbw, ['MISC_SHARES_VIA_AGGREGATE'])
|
|
||||||
# It's associated with some other aggregate.
|
|
||||||
self.client.set_aggregates_for_provider(self.context, uuids.sbw,
|
|
||||||
[uuids.agg_bw])
|
|
||||||
|
|
||||||
# Setup is done. Grab the ProviderTree
|
|
||||||
prov_tree = self.client.get_provider_tree_and_ensure_root(
|
|
||||||
self.context, self.compute_uuid)
|
|
||||||
|
|
||||||
# All providers show up because we used set_inventory_for_provider
|
|
||||||
self.assertEqual(set([self.compute_uuid, uuids.ss1, uuids.ss2,
|
|
||||||
uuids.pf1, uuids.pf2, uuids.sip, uuids.ss3,
|
|
||||||
uuids.sbw]),
|
|
||||||
set(prov_tree.get_provider_uuids()))
|
|
||||||
# Narrow the field to just our compute subtree.
|
|
||||||
self.assertEqual(
|
|
||||||
set([self.compute_uuid, uuids.pf1, uuids.pf2]),
|
|
||||||
set(prov_tree.get_provider_uuids(self.compute_uuid)))
|
|
||||||
|
|
||||||
# Validate traits for a couple of providers
|
|
||||||
self.assertFalse(prov_tree.have_traits_changed(
|
|
||||||
uuids.pf2, ['CUSTOM_PHYSNET_2']))
|
|
||||||
self.assertFalse(prov_tree.have_traits_changed(
|
|
||||||
uuids.sip, ['MISC_SHARES_VIA_AGGREGATE', 'CUSTOM_FOO']))
|
|
||||||
|
|
||||||
# Validate aggregates for a couple of providers
|
|
||||||
self.assertFalse(prov_tree.have_aggregates_changed(
|
|
||||||
uuids.sbw, [uuids.agg_bw]))
|
|
||||||
self.assertFalse(prov_tree.have_aggregates_changed(
|
|
||||||
self.compute_uuid, [uuids.agg_disk_1, uuids.agg_disk_2]))
|
|
||||||
|
|
||||||
def test__set_inventory_for_provider(self):
|
|
||||||
"""Tests for SchedulerReportClient._set_inventory_for_provider, NOT
|
|
||||||
set_inventory_for_provider.
|
|
||||||
"""
|
|
||||||
with self._interceptor():
|
|
||||||
inv = {
|
|
||||||
fields.ResourceClass.SRIOV_NET_VF: {
|
|
||||||
'total': 24,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 24,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
# Provider doesn't exist in our cache
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
self.client._set_inventory_for_provider,
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
self.assertIsNone(self.client._get_inventory(
|
|
||||||
self.context, uuids.cn))
|
|
||||||
|
|
||||||
# Create the provider
|
|
||||||
self.client._ensure_resource_provider(self.context, uuids.cn)
|
|
||||||
# Still no inventory, but now we don't get a 404
|
|
||||||
self.assertEqual(
|
|
||||||
{},
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Now set the inventory
|
|
||||||
self.client._set_inventory_for_provider(
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Make sure we can change it
|
|
||||||
inv = {
|
|
||||||
fields.ResourceClass.SRIOV_NET_VF: {
|
|
||||||
'total': 24,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 24,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
fields.ResourceClass.IPV4_ADDRESS: {
|
|
||||||
'total': 128,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 8,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
self.client._set_inventory_for_provider(
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Create custom resource classes on the fly
|
|
||||||
self.assertFalse(
|
|
||||||
self.client.get('/resource_classes/CUSTOM_BANDWIDTH'))
|
|
||||||
inv = {
|
|
||||||
fields.ResourceClass.SRIOV_NET_VF: {
|
|
||||||
'total': 24,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 24,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
fields.ResourceClass.IPV4_ADDRESS: {
|
|
||||||
'total': 128,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 8,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
'CUSTOM_BANDWIDTH': {
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
self.client._set_inventory_for_provider(
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
# The custom resource class got created.
|
|
||||||
self.assertTrue(
|
|
||||||
self.client.get('/resource_classes/CUSTOM_BANDWIDTH'))
|
|
||||||
|
|
||||||
# Creating a bogus resource class raises the appropriate exception.
|
|
||||||
bogus_inv = dict(inv)
|
|
||||||
bogus_inv['CUSTOM_BOGU$$'] = {
|
|
||||||
'total': 1,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 1,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
}
|
|
||||||
self.assertRaises(
|
|
||||||
exception.InvalidResourceClass,
|
|
||||||
self.client._set_inventory_for_provider,
|
|
||||||
self.context, uuids.cn, bogus_inv)
|
|
||||||
self.assertFalse(
|
|
||||||
self.client.get('/resource_classes/BOGUS'))
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Create a generation conflict by doing an "out of band" update
|
|
||||||
oob_inv = {
|
|
||||||
fields.ResourceClass.IPV4_ADDRESS: {
|
|
||||||
'total': 128,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 8,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
gen = self.client._provider_tree.data(uuids.cn).generation
|
|
||||||
self.assertTrue(
|
|
||||||
self.client.put(
|
|
||||||
'/resource_providers/%s/inventories' % uuids.cn,
|
|
||||||
{'resource_provider_generation': gen,
|
|
||||||
'inventories': oob_inv}))
|
|
||||||
self.assertEqual(
|
|
||||||
oob_inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Now try to update again.
|
|
||||||
inv = {
|
|
||||||
fields.ResourceClass.SRIOV_NET_VF: {
|
|
||||||
'total': 24,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 24,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
'CUSTOM_BANDWIDTH': {
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
# Cached generation is off, so this will bounce with a conflict.
|
|
||||||
self.assertRaises(
|
|
||||||
exception.ResourceProviderUpdateConflict,
|
|
||||||
self.client._set_inventory_for_provider,
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
# Inventory still corresponds to the out-of-band update
|
|
||||||
self.assertEqual(
|
|
||||||
oob_inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
# Force refresh to get the latest generation
|
|
||||||
self.client._refresh_and_get_inventory(self.context, uuids.cn)
|
|
||||||
# Now the update should work
|
|
||||||
self.client._set_inventory_for_provider(
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Now set up an InventoryInUse case by creating a VF allocation...
|
|
||||||
self.assertTrue(
|
|
||||||
self.client.put_allocations(
|
|
||||||
self.context, uuids.cn, uuids.consumer,
|
|
||||||
{fields.ResourceClass.SRIOV_NET_VF: 1},
|
|
||||||
uuids.proj, uuids.user))
|
|
||||||
# ...and trying to delete the provider's VF inventory
|
|
||||||
bad_inv = {
|
|
||||||
'CUSTOM_BANDWIDTH': {
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
# Allocation bumped the generation, so refresh to get the latest
|
|
||||||
self.client._refresh_and_get_inventory(self.context, uuids.cn)
|
|
||||||
self.assertRaises(
|
|
||||||
exception.InventoryInUse,
|
|
||||||
self.client._set_inventory_for_provider,
|
|
||||||
self.context, uuids.cn, bad_inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Same result if we try to clear all the inventory
|
|
||||||
bad_inv = {}
|
|
||||||
self.assertRaises(
|
|
||||||
exception.InventoryInUse,
|
|
||||||
self.client._set_inventory_for_provider,
|
|
||||||
self.context, uuids.cn, bad_inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
# Remove the allocation to make it work
|
|
||||||
self.client.delete('/allocations/' + uuids.consumer)
|
|
||||||
# Force refresh to get the latest generation
|
|
||||||
self.client._refresh_and_get_inventory(self.context, uuids.cn)
|
|
||||||
inv = {}
|
|
||||||
self.client._set_inventory_for_provider(
|
|
||||||
self.context, uuids.cn, inv)
|
|
||||||
self.assertEqual(
|
|
||||||
inv,
|
|
||||||
self.client._get_inventory(
|
|
||||||
self.context, uuids.cn)['inventories'])
|
|
||||||
|
|
||||||
def test_update_from_provider_tree(self):
|
|
||||||
"""A "realistic" walk through the lifecycle of a compute node provider
|
|
||||||
tree.
|
|
||||||
"""
|
|
||||||
# NOTE(efried): We can use the same ProviderTree throughout, since
|
|
||||||
# update_from_provider_tree doesn't change it.
|
|
||||||
new_tree = provider_tree.ProviderTree()
|
|
||||||
|
|
||||||
def assert_ptrees_equal():
|
|
||||||
uuids = set(self.client._provider_tree.get_provider_uuids())
|
|
||||||
self.assertEqual(uuids, set(new_tree.get_provider_uuids()))
|
|
||||||
for uuid in uuids:
|
|
||||||
cdata = self.client._provider_tree.data(uuid)
|
|
||||||
ndata = new_tree.data(uuid)
|
|
||||||
self.assertEqual(ndata.name, cdata.name)
|
|
||||||
self.assertEqual(ndata.parent_uuid, cdata.parent_uuid)
|
|
||||||
self.assertFalse(
|
|
||||||
new_tree.has_inventory_changed(uuid, cdata.inventory))
|
|
||||||
self.assertFalse(
|
|
||||||
new_tree.have_traits_changed(uuid, cdata.traits))
|
|
||||||
self.assertFalse(
|
|
||||||
new_tree.have_aggregates_changed(uuid, cdata.aggregates))
|
|
||||||
|
|
||||||
# To begin with, the cache should be empty
|
|
||||||
self.assertEqual([], self.client._provider_tree.get_provider_uuids())
|
|
||||||
# When new_tree is empty, it's a no-op.
|
|
||||||
# Do this outside the interceptor to prove no API calls are made.
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
with self._interceptor():
|
|
||||||
# Populate with a provider with no inventories, aggregates, traits
|
|
||||||
new_tree.new_root('root', uuids.root)
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Throw in some more providers, in various spots in the tree, with
|
|
||||||
# some sub-properties
|
|
||||||
new_tree.new_child('child1', uuids.root, uuid=uuids.child1)
|
|
||||||
new_tree.update_aggregates('child1', [uuids.agg1, uuids.agg2])
|
|
||||||
new_tree.new_child('grandchild1_1', uuids.child1, uuid=uuids.gc1_1)
|
|
||||||
new_tree.update_traits(uuids.gc1_1, ['CUSTOM_PHYSNET_2'])
|
|
||||||
new_tree.new_root('ssp', uuids.ssp)
|
|
||||||
new_tree.update_inventory('ssp', {
|
|
||||||
fields.ResourceClass.DISK_GB: {
|
|
||||||
'total': 100,
|
|
||||||
'reserved': 1,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 10,
|
|
||||||
'step_size': 2,
|
|
||||||
'allocation_ratio': 10.0,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Swizzle properties
|
|
||||||
# Give the root some everything
|
|
||||||
new_tree.update_inventory(uuids.root, {
|
|
||||||
fields.ResourceClass.VCPU: {
|
|
||||||
'total': 10,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 2,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 10.0,
|
|
||||||
},
|
|
||||||
fields.ResourceClass.MEMORY_MB: {
|
|
||||||
'total': 1048576,
|
|
||||||
'reserved': 2048,
|
|
||||||
'min_unit': 1024,
|
|
||||||
'max_unit': 131072,
|
|
||||||
'step_size': 1024,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
new_tree.update_aggregates(uuids.root, [uuids.agg1])
|
|
||||||
new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
|
|
||||||
'HW_CPU_X86_AVX2'])
|
|
||||||
# Take away the child's aggregates
|
|
||||||
new_tree.update_aggregates(uuids.child1, [])
|
|
||||||
# Grandchild gets some inventory
|
|
||||||
ipv4_inv = {
|
|
||||||
fields.ResourceClass.IPV4_ADDRESS: {
|
|
||||||
'total': 128,
|
|
||||||
'reserved': 0,
|
|
||||||
'min_unit': 1,
|
|
||||||
'max_unit': 8,
|
|
||||||
'step_size': 1,
|
|
||||||
'allocation_ratio': 1.0,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
new_tree.update_inventory('grandchild1_1', ipv4_inv)
|
|
||||||
# Shared storage provider gets traits
|
|
||||||
new_tree.update_traits('ssp', set(['MISC_SHARES_VIA_AGGREGATE',
|
|
||||||
'STORAGE_DISK_SSD']))
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Let's go for some error scenarios.
|
|
||||||
# Add inventory in an invalid resource class
|
|
||||||
new_tree.update_inventory(
|
|
||||||
'grandchild1_1',
|
|
||||||
dict(ipv4_inv,
|
|
||||||
MOTSUC_BANDWIDTH={
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
}))
|
|
||||||
self.assertRaises(
|
|
||||||
exception.ResourceProviderSyncFailed,
|
|
||||||
self.client.update_from_provider_tree, self.context, new_tree)
|
|
||||||
# The inventory update didn't get synced...
|
|
||||||
self.assertIsNone(self.client._get_inventory(
|
|
||||||
self.context, uuids.grandchild1_1))
|
|
||||||
# ...and the grandchild was removed from the cache
|
|
||||||
self.assertFalse(
|
|
||||||
self.client._provider_tree.exists('grandchild1_1'))
|
|
||||||
|
|
||||||
# Fix that problem so we can try the next one
|
|
||||||
new_tree.update_inventory(
|
|
||||||
'grandchild1_1',
|
|
||||||
dict(ipv4_inv,
|
|
||||||
CUSTOM_BANDWIDTH={
|
|
||||||
'total': 1250000,
|
|
||||||
'reserved': 10000,
|
|
||||||
'min_unit': 5000,
|
|
||||||
'max_unit': 250000,
|
|
||||||
'step_size': 5000,
|
|
||||||
'allocation_ratio': 8.0,
|
|
||||||
}))
|
|
||||||
|
|
||||||
# Add a bogus trait
|
|
||||||
new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
|
|
||||||
'HW_CPU_X86_AVX2',
|
|
||||||
'MOTSUC_FOO'])
|
|
||||||
self.assertRaises(
|
|
||||||
exception.ResourceProviderSyncFailed,
|
|
||||||
self.client.update_from_provider_tree, self.context, new_tree)
|
|
||||||
# Placement didn't get updated
|
|
||||||
self.assertEqual(set(['HW_CPU_X86_AVX', 'HW_CPU_X86_AVX2']),
|
|
||||||
self.client._get_provider_traits(self.context,
|
|
||||||
uuids.root))
|
|
||||||
# ...and the root was removed from the cache
|
|
||||||
self.assertFalse(self.client._provider_tree.exists(uuids.root))
|
|
||||||
|
|
||||||
# Fix that problem
|
|
||||||
new_tree.update_traits(uuids.root, ['HW_CPU_X86_AVX',
|
|
||||||
'HW_CPU_X86_AVX2',
|
|
||||||
'CUSTOM_FOO'])
|
|
||||||
|
|
||||||
# Now the sync should work
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Let's cause a conflict error by doing an "out-of-band" update
|
|
||||||
gen = self.client._provider_tree.data(uuids.ssp).generation
|
|
||||||
self.assertTrue(self.client.put(
|
|
||||||
'/resource_providers/%s/traits' % uuids.ssp,
|
|
||||||
{'resource_provider_generation': gen,
|
|
||||||
'traits': ['MISC_SHARES_VIA_AGGREGATE', 'STORAGE_DISK_HDD']},
|
|
||||||
version='1.6'))
|
|
||||||
|
|
||||||
# Now if we try to modify the traits, we should fail and invalidate
|
|
||||||
# the cache...
|
|
||||||
new_tree.update_traits(uuids.ssp, ['MISC_SHARES_VIA_AGGREGATE',
|
|
||||||
'STORAGE_DISK_SSD',
|
|
||||||
'CUSTOM_FAST'])
|
|
||||||
self.assertRaises(
|
|
||||||
exception.ResourceProviderSyncFailed,
|
|
||||||
self.client.update_from_provider_tree, self.context, new_tree)
|
|
||||||
# ...but the next iteration will refresh the cache with the latest
|
|
||||||
# generation and so the next attempt should succeed.
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
# The out-of-band change is blown away, as it should be.
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Let's delete some stuff
|
|
||||||
new_tree.remove(uuids.ssp)
|
|
||||||
self.assertFalse(new_tree.exists('ssp'))
|
|
||||||
new_tree.remove('child1')
|
|
||||||
self.assertFalse(new_tree.exists('child1'))
|
|
||||||
# Removing a node removes its descendants too
|
|
||||||
self.assertFalse(new_tree.exists('grandchild1_1'))
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Remove the last provider
|
|
||||||
new_tree.remove(uuids.root)
|
|
||||||
self.assertEqual([], new_tree.get_provider_uuids())
|
|
||||||
self.client.update_from_provider_tree(self.context, new_tree)
|
|
||||||
assert_ptrees_equal()
|
|
||||||
|
|
||||||
# Having removed the providers this way, they ought to be gone
|
|
||||||
# from placement
|
|
||||||
for uuid in (uuids.root, uuids.child1, uuids.grandchild1_1,
|
|
||||||
uuids.ssp):
|
|
||||||
resp = self.client.get('/resource_providers/%s' % uuid)
|
|
||||||
self.assertEqual(404, resp.status_code)
|
|
Loading…
Reference in New Issue
Block a user