You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
4105 lines
168 KiB
4105 lines
168 KiB
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
|
# not use this file except in compliance with the License. You may obtain |
|
# a copy of the License at |
|
# |
|
# http://www.apache.org/licenses/LICENSE-2.0 |
|
# |
|
# Unless required by applicable law or agreed to in writing, software |
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
|
# License for the specific language governing permissions and limitations |
|
# under the License. |
|
import copy |
|
import time |
|
|
|
import fixtures |
|
from keystoneauth1 import exceptions as ks_exc |
|
import mock |
|
import os_resource_classes as orc |
|
from oslo_serialization import jsonutils |
|
from oslo_utils.fixture import uuidsentinel as uuids |
|
import six |
|
from six.moves.urllib import parse |
|
|
|
import nova.conf |
|
from nova import context |
|
from nova import exception |
|
from nova import objects |
|
from nova.scheduler.client import report |
|
from nova.scheduler import utils as scheduler_utils |
|
from nova import test |
|
from nova.tests import fixtures as nova_fixtures |
|
from nova.tests.unit import fake_requests |
|
|
|
|
|
CONF = nova.conf.CONF |
|
|
|
|
|
class SafeConnectedTestCase(test.NoDBTestCase): |
|
"""Test the safe_connect decorator for the scheduler client.""" |
|
|
|
def setUp(self): |
|
super(SafeConnectedTestCase, self).setUp() |
|
self.context = context.get_admin_context() |
|
|
|
with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'): |
|
self.client = report.SchedulerReportClient() |
|
|
|
@mock.patch('keystoneauth1.session.Session.request') |
|
def test_missing_endpoint(self, req): |
|
"""Test EndpointNotFound behavior. |
|
|
|
A missing endpoint entry should not explode. |
|
""" |
|
req.side_effect = ks_exc.EndpointNotFound() |
|
self.client._get_resource_provider(self.context, "fake") |
|
|
|
# reset the call count to demonstrate that future calls still |
|
# work |
|
req.reset_mock() |
|
self.client._get_resource_provider(self.context, "fake") |
|
self.assertTrue(req.called) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_client') |
|
@mock.patch('keystoneauth1.session.Session.request') |
|
def test_missing_endpoint_create_client(self, req, create_client): |
|
"""Test EndpointNotFound retry behavior. |
|
|
|
A missing endpoint should cause _create_client to be called. |
|
""" |
|
req.side_effect = ks_exc.EndpointNotFound() |
|
self.client._get_resource_provider(self.context, "fake") |
|
|
|
# This is the second time _create_client is called, but the first since |
|
# the mock was created. |
|
self.assertTrue(create_client.called) |
|
|
|
@mock.patch('keystoneauth1.session.Session.request') |
|
def test_missing_auth(self, req): |
|
"""Test Missing Auth handled correctly. |
|
|
|
A missing auth configuration should not explode. |
|
|
|
""" |
|
req.side_effect = ks_exc.MissingAuthPlugin() |
|
self.client._get_resource_provider(self.context, "fake") |
|
|
|
# reset the call count to demonstrate that future calls still |
|
# work |
|
req.reset_mock() |
|
self.client._get_resource_provider(self.context, "fake") |
|
self.assertTrue(req.called) |
|
|
|
@mock.patch('keystoneauth1.session.Session.request') |
|
def test_unauthorized(self, req): |
|
"""Test Unauthorized handled correctly. |
|
|
|
An unauthorized configuration should not explode. |
|
|
|
""" |
|
req.side_effect = ks_exc.Unauthorized() |
|
self.client._get_resource_provider(self.context, "fake") |
|
|
|
# reset the call count to demonstrate that future calls still |
|
# work |
|
req.reset_mock() |
|
self.client._get_resource_provider(self.context, "fake") |
|
self.assertTrue(req.called) |
|
|
|
@mock.patch('keystoneauth1.session.Session.request') |
|
def test_connect_fail(self, req): |
|
"""Test Connect Failure handled correctly. |
|
|
|
If we get a connect failure, this is transient, and we expect |
|
that this will end up working correctly later. |
|
|
|
""" |
|
req.side_effect = ks_exc.ConnectFailure() |
|
self.client._get_resource_provider(self.context, "fake") |
|
|
|
# reset the call count to demonstrate that future calls do |
|
# work |
|
req.reset_mock() |
|
self.client._get_resource_provider(self.context, "fake") |
|
self.assertTrue(req.called) |
|
|
|
@mock.patch.object(report, 'LOG') |
|
def test_warning_limit(self, mock_log): |
|
# Assert that __init__ initializes _warn_count as we expect |
|
self.assertEqual(0, self.client._warn_count) |
|
mock_self = mock.MagicMock() |
|
mock_self._warn_count = 0 |
|
for i in range(0, report.WARN_EVERY + 3): |
|
report.warn_limit(mock_self, 'warning') |
|
mock_log.warning.assert_has_calls([mock.call('warning'), |
|
mock.call('warning')]) |
|
|
|
@mock.patch('keystoneauth1.session.Session.request') |
|
def test_failed_discovery(self, req): |
|
"""Test DiscoveryFailure behavior. |
|
|
|
Failed discovery should not blow up. |
|
""" |
|
req.side_effect = ks_exc.DiscoveryFailure() |
|
self.client._get_resource_provider(self.context, "fake") |
|
|
|
# reset the call count to demonstrate that future calls still |
|
# work |
|
req.reset_mock() |
|
self.client._get_resource_provider(self.context, "fake") |
|
self.assertTrue(req.called) |
|
|
|
|
|
class TestConstructor(test.NoDBTestCase): |
|
@mock.patch('keystoneauth1.loading.load_session_from_conf_options') |
|
@mock.patch('keystoneauth1.loading.load_auth_from_conf_options') |
|
def test_constructor(self, load_auth_mock, load_sess_mock): |
|
client = report.SchedulerReportClient() |
|
|
|
load_auth_mock.assert_called_once_with(CONF, 'placement') |
|
load_sess_mock.assert_called_once_with(CONF, 'placement', |
|
auth=load_auth_mock.return_value) |
|
self.assertEqual(['internal', 'public'], client._client.interface) |
|
self.assertEqual({'accept': 'application/json'}, |
|
client._client.additional_headers) |
|
|
|
@mock.patch('keystoneauth1.loading.load_session_from_conf_options') |
|
@mock.patch('keystoneauth1.loading.load_auth_from_conf_options') |
|
def test_constructor_admin_interface(self, load_auth_mock, load_sess_mock): |
|
self.flags(valid_interfaces='admin', group='placement') |
|
client = report.SchedulerReportClient() |
|
|
|
load_auth_mock.assert_called_once_with(CONF, 'placement') |
|
load_sess_mock.assert_called_once_with(CONF, 'placement', |
|
auth=load_auth_mock.return_value) |
|
self.assertEqual(['admin'], client._client.interface) |
|
self.assertEqual({'accept': 'application/json'}, |
|
client._client.additional_headers) |
|
|
|
|
|
class SchedulerReportClientTestCase(test.NoDBTestCase): |
|
|
|
def setUp(self): |
|
super(SchedulerReportClientTestCase, self).setUp() |
|
self.context = context.get_admin_context() |
|
self.ks_adap_mock = mock.Mock() |
|
self.compute_node = objects.ComputeNode( |
|
uuid=uuids.compute_node, |
|
hypervisor_hostname='foo', |
|
vcpus=8, |
|
cpu_allocation_ratio=16.0, |
|
memory_mb=1024, |
|
ram_allocation_ratio=1.5, |
|
local_gb=10, |
|
disk_allocation_ratio=1.0, |
|
) |
|
|
|
with test.nested( |
|
mock.patch('keystoneauth1.adapter.Adapter', |
|
return_value=self.ks_adap_mock), |
|
mock.patch('keystoneauth1.loading.load_auth_from_conf_options') |
|
): |
|
self.client = report.SchedulerReportClient() |
|
|
|
def _init_provider_tree(self, generation_override=None, |
|
resources_override=None): |
|
cn = self.compute_node |
|
resources = resources_override |
|
if resources_override is None: |
|
resources = { |
|
'VCPU': { |
|
'total': cn.vcpus, |
|
'reserved': 0, |
|
'min_unit': 1, |
|
'max_unit': cn.vcpus, |
|
'step_size': 1, |
|
'allocation_ratio': cn.cpu_allocation_ratio, |
|
}, |
|
'MEMORY_MB': { |
|
'total': cn.memory_mb, |
|
'reserved': 512, |
|
'min_unit': 1, |
|
'max_unit': cn.memory_mb, |
|
'step_size': 1, |
|
'allocation_ratio': cn.ram_allocation_ratio, |
|
}, |
|
'DISK_GB': { |
|
'total': cn.local_gb, |
|
'reserved': 0, |
|
'min_unit': 1, |
|
'max_unit': cn.local_gb, |
|
'step_size': 1, |
|
'allocation_ratio': cn.disk_allocation_ratio, |
|
}, |
|
} |
|
generation = generation_override or 1 |
|
rp_uuid = self.client._provider_tree.new_root( |
|
cn.hypervisor_hostname, |
|
cn.uuid, |
|
generation=generation, |
|
) |
|
self.client._provider_tree.update_inventory(rp_uuid, resources) |
|
|
|
def _validate_provider(self, name_or_uuid, **kwargs): |
|
"""Validates existence and values of a provider in this client's |
|
_provider_tree. |
|
|
|
:param name_or_uuid: The name or UUID of the provider to validate. |
|
:param kwargs: Optional keyword arguments of ProviderData attributes |
|
whose values are to be validated. |
|
""" |
|
found = self.client._provider_tree.data(name_or_uuid) |
|
# If kwargs provided, their names indicate ProviderData attributes |
|
for attr, expected in kwargs.items(): |
|
try: |
|
self.assertEqual(getattr(found, attr), expected) |
|
except AttributeError: |
|
self.fail("Provider with name or UUID %s doesn't have " |
|
"attribute %s (expected value: %s)" % |
|
(name_or_uuid, attr, expected)) |
|
|
|
|
|
class TestPutAllocations(SchedulerReportClientTestCase): |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') |
|
def test_put_allocations(self, mock_put): |
|
mock_put.return_value.status_code = 204 |
|
mock_put.return_value.text = "cool" |
|
rp_uuid = mock.sentinel.rp |
|
consumer_uuid = mock.sentinel.consumer |
|
data = {"MEMORY_MB": 1024} |
|
expected_url = "/allocations/%s" % consumer_uuid |
|
resp = self.client.put_allocations(self.context, rp_uuid, |
|
consumer_uuid, data, |
|
mock.sentinel.project_id, |
|
mock.sentinel.user_id, |
|
mock.sentinel.consumer_generation) |
|
self.assertTrue(resp) |
|
mock_put.assert_called_once_with( |
|
expected_url, mock.ANY, version='1.28', |
|
global_request_id=self.context.global_id) |
|
|
|
@mock.patch.object(report.LOG, 'warning') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') |
|
def test_put_allocations_fail(self, mock_put, mock_warn): |
|
mock_put.return_value.status_code = 400 |
|
mock_put.return_value.text = "not cool" |
|
rp_uuid = mock.sentinel.rp |
|
consumer_uuid = mock.sentinel.consumer |
|
data = {"MEMORY_MB": 1024} |
|
expected_url = "/allocations/%s" % consumer_uuid |
|
resp = self.client.put_allocations(self.context, rp_uuid, |
|
consumer_uuid, data, |
|
mock.sentinel.project_id, |
|
mock.sentinel.user_id, |
|
mock.sentinel.consumer_generation) |
|
self.assertFalse(resp) |
|
mock_put.assert_called_once_with( |
|
expected_url, mock.ANY, version='1.28', |
|
global_request_id=self.context.global_id) |
|
log_msg = mock_warn.call_args[0][0] |
|
self.assertIn("Failed to save allocation for", log_msg) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') |
|
def test_put_allocations_fail_due_to_consumer_generation_conflict( |
|
self, mock_put): |
|
mock_put.return_value = fake_requests.FakeResponse( |
|
status_code=409, |
|
content=jsonutils.dumps( |
|
{'errors': [{'code': 'placement.concurrent_update', |
|
'detail': 'consumer generation conflict'}]})) |
|
|
|
rp_uuid = mock.sentinel.rp |
|
consumer_uuid = mock.sentinel.consumer |
|
data = {"MEMORY_MB": 1024} |
|
expected_url = "/allocations/%s" % consumer_uuid |
|
self.assertRaises(exception.AllocationUpdateFailed, |
|
self.client.put_allocations, |
|
self.context, rp_uuid, |
|
consumer_uuid, data, |
|
mock.sentinel.project_id, |
|
mock.sentinel.user_id, |
|
mock.sentinel.consumer_generation) |
|
|
|
mock_put.assert_called_once_with( |
|
expected_url, mock.ANY, version='1.28', |
|
global_request_id=self.context.global_id) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') |
|
def test_put_allocations_retries_conflict(self, mock_put): |
|
failed = fake_requests.FakeResponse( |
|
status_code=409, |
|
content=jsonutils.dumps( |
|
{'errors': [{'code': 'placement.concurrent_update', |
|
'detail': ''}]})) |
|
|
|
succeeded = mock.MagicMock() |
|
succeeded.status_code = 204 |
|
|
|
mock_put.side_effect = (failed, succeeded) |
|
|
|
rp_uuid = mock.sentinel.rp |
|
consumer_uuid = mock.sentinel.consumer |
|
data = {"MEMORY_MB": 1024} |
|
expected_url = "/allocations/%s" % consumer_uuid |
|
resp = self.client.put_allocations(self.context, rp_uuid, |
|
consumer_uuid, data, |
|
mock.sentinel.project_id, |
|
mock.sentinel.user_id, |
|
mock.sentinel.consumer_generation) |
|
self.assertTrue(resp) |
|
mock_put.assert_has_calls([ |
|
mock.call(expected_url, mock.ANY, version='1.28', |
|
global_request_id=self.context.global_id)] * 2) |
|
|
|
@mock.patch('time.sleep', new=mock.Mock()) |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put') |
|
def test_put_allocations_retry_gives_up(self, mock_put): |
|
|
|
failed = fake_requests.FakeResponse( |
|
status_code=409, |
|
content=jsonutils.dumps( |
|
{'errors': [{'code': 'placement.concurrent_update', |
|
'detail': ''}]})) |
|
|
|
mock_put.return_value = failed |
|
|
|
rp_uuid = mock.sentinel.rp |
|
consumer_uuid = mock.sentinel.consumer |
|
data = {"MEMORY_MB": 1024} |
|
expected_url = "/allocations/%s" % consumer_uuid |
|
resp = self.client.put_allocations(self.context, rp_uuid, |
|
consumer_uuid, data, |
|
mock.sentinel.project_id, |
|
mock.sentinel.user_id, |
|
mock.sentinel.consumer_generation) |
|
self.assertFalse(resp) |
|
mock_put.assert_has_calls([ |
|
mock.call(expected_url, mock.ANY, version='1.28', |
|
global_request_id=self.context.global_id)] * 3) |
|
|
|
def test_claim_resources_success(self): |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.return_value = { |
|
'allocations': {}, # build instance, not move |
|
} |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = mock.Mock(status_code=204) |
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
alloc_req = { |
|
'allocations': { |
|
uuids.cn1: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
}, |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.12') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = {'allocations': { |
|
rp_uuid: alloc |
|
for rp_uuid, alloc in alloc_req['allocations'].items()}} |
|
expected_payload['project_id'] = project_id |
|
expected_payload['user_id'] = user_id |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.12', json=expected_payload, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_older_alloc_req(self): |
|
"""Test the case when a stale allocation request is sent to the report |
|
client to claim |
|
""" |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.return_value = { |
|
'allocations': {}, # build instance, not move |
|
} |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = mock.Mock(status_code=204) |
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
alloc_req = { |
|
'allocations': { |
|
uuids.cn1: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
}, |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.12') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = { |
|
'allocations': { |
|
rp_uuid: res |
|
for rp_uuid, res in alloc_req['allocations'].items()}, |
|
# no consumer generation in the payload as the caller requested |
|
# older microversion to be used |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.12', json=expected_payload, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_success_resize_to_same_host_no_shared(self): |
|
"""Tests resize to the same host operation. In this case allocation |
|
exists against the same host RP but with the migration_uuid. |
|
""" |
|
get_current_allocations_resp_mock = mock.Mock(status_code=200) |
|
# source host allocation held by the migration_uuid so it is not |
|
# not returned to the claim code as that asks for the instance_uuid |
|
# consumer |
|
get_current_allocations_resp_mock.json.return_value = { |
|
'allocations': {}, |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock |
|
put_allocations_resp_mock = mock.Mock(status_code=204) |
|
self.ks_adap_mock.put.return_value = put_allocations_resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB |
|
# are all being increased but on the same host. We also throw a custom |
|
# resource class in the new allocation to make sure it's not lost |
|
alloc_req = { |
|
'allocations': { |
|
uuids.same_host: { |
|
'resources': { |
|
'VCPU': 2, |
|
'MEMORY_MB': 2048, |
|
'DISK_GB': 40, |
|
'CUSTOM_FOO': 1 |
|
} |
|
}, |
|
}, |
|
# this allocation request comes from the scheduler therefore it |
|
# does not have consumer_generation in it. |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = { |
|
'allocations': { |
|
uuids.same_host: { |
|
'resources': { |
|
'VCPU': 2, |
|
'MEMORY_MB': 2048, |
|
'DISK_GB': 40, |
|
'CUSTOM_FOO': 1 |
|
} |
|
}, |
|
}, |
|
# report client assumes a new consumer in this case |
|
'consumer_generation': None, |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_success_resize_to_same_host_with_shared(self): |
|
"""Tests resize to the same host operation. In this case allocation |
|
exists against the same host RP and the shared RP but with the |
|
migration_uuid. |
|
""" |
|
get_current_allocations_resp_mock = mock.Mock(status_code=200) |
|
# source host allocation held by the migration_uuid so it is not |
|
# not returned to the claim code as that asks for the instance_uuid |
|
# consumer |
|
get_current_allocations_resp_mock.json.return_value = { |
|
'allocations': {}, |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
self.ks_adap_mock.get.return_value = get_current_allocations_resp_mock |
|
put_allocations_resp_mock = mock.Mock(status_code=204) |
|
self.ks_adap_mock.put.return_value = put_allocations_resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB |
|
# are all being increased but on the same host. We also throw a custom |
|
# resource class in the new allocation to make sure it's not lost |
|
alloc_req = { |
|
'allocations': { |
|
uuids.same_host: { |
|
'resources': { |
|
'VCPU': 2, |
|
'MEMORY_MB': 2048, |
|
'CUSTOM_FOO': 1 |
|
} |
|
}, |
|
uuids.shared_storage: { |
|
'resources': { |
|
'DISK_GB': 40, |
|
} |
|
}, |
|
}, |
|
# this allocation request comes from the scheduler therefore it |
|
# does not have consumer_generation in it. |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = { |
|
'allocations': { |
|
uuids.same_host: { |
|
'resources': { |
|
'VCPU': 2, |
|
'MEMORY_MB': 2048, |
|
'CUSTOM_FOO': 1 |
|
} |
|
}, |
|
uuids.shared_storage: { |
|
'resources': { |
|
'DISK_GB': 40, |
|
} |
|
}, |
|
}, |
|
# report client assumes a new consumer in this case |
|
'consumer_generation': None, |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_success_evacuate_no_shared(self): |
|
"""Tests non-forced evacuate. In this case both the source and the |
|
dest allocation are held by the instance_uuid in placement. So the |
|
claim code needs to merge allocations. The second claim comes from the |
|
scheduler and therefore it does not have consumer_generation in it. |
|
""" |
|
# the source allocation is also held by the instance_uuid so report |
|
# client will see it. |
|
current_allocs = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20 |
|
}, |
|
}, |
|
}, |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse( |
|
status_code=200, |
|
content=jsonutils.dumps(current_allocs)) |
|
put_allocations_resp_mock = fake_requests.FakeResponse(status_code=204) |
|
self.ks_adap_mock.put.return_value = put_allocations_resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
# this is an evacuate so we have the same resources request towards the |
|
# dest host |
|
alloc_req = { |
|
'allocations': { |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20, |
|
} |
|
}, |
|
}, |
|
# this allocation request comes from the scheduler therefore it |
|
# does not have consumer_generation in it. |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# we expect that both the source and dest allocations are here |
|
expected_payload = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20 |
|
}, |
|
}, |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20, |
|
} |
|
}, |
|
}, |
|
# report client uses the consumer_generation that it got from |
|
# placement when asked for the existing allocations |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_success_evacuate_with_shared(self): |
|
"""Similar test that test_claim_resources_success_evacuate_no_shared |
|
but adds shared disk into the mix. |
|
""" |
|
# the source allocation is also held by the instance_uuid so report |
|
# client will see it. |
|
current_allocs = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.shared_storage: { |
|
'generation': 42, |
|
'resources': { |
|
'DISK_GB': 20, |
|
}, |
|
}, |
|
}, |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse( |
|
status_code=200, |
|
content = jsonutils.dumps(current_allocs)) |
|
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse( |
|
status_code=204) |
|
consumer_uuid = uuids.consumer_uuid |
|
# this is an evacuate so we have the same resources request towards the |
|
# dest host |
|
alloc_req = { |
|
'allocations': { |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.shared_storage: { |
|
'generation': 42, |
|
'resources': { |
|
'DISK_GB': 20, |
|
}, |
|
}, |
|
}, |
|
# this allocation request comes from the scheduler therefore it |
|
# does not have consumer_generation in it. |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# we expect that both the source and dest allocations are here plus the |
|
# shared storage allocation |
|
expected_payload = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
uuids.shared_storage: { |
|
'resources': { |
|
'DISK_GB': 20, |
|
}, |
|
}, |
|
}, |
|
# report client uses the consumer_generation that got from |
|
# placement when asked for the existing allocations |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_success_force_evacuate_no_shared(self): |
|
"""Tests forced evacuate. In this case both the source and the |
|
dest allocation are held by the instance_uuid in placement. So the |
|
claim code needs to merge allocations. The second claim comes from the |
|
conductor and therefore it does have consumer_generation in it. |
|
""" |
|
# the source allocation is also held by the instance_uuid so report |
|
# client will see it. |
|
current_allocs = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20 |
|
}, |
|
}, |
|
}, |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse( |
|
status_code=200, |
|
content=jsonutils.dumps(current_allocs)) |
|
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse( |
|
status_code=204) |
|
consumer_uuid = uuids.consumer_uuid |
|
# this is an evacuate so we have the same resources request towards the |
|
# dest host |
|
alloc_req = { |
|
'allocations': { |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20, |
|
} |
|
}, |
|
}, |
|
# this allocation request comes from the conductor that read the |
|
# allocation from placement therefore it has consumer_generation in |
|
# it. |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# we expect that both the source and dest allocations are here |
|
expected_payload = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20 |
|
}, |
|
}, |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
'DISK_GB': 20, |
|
} |
|
}, |
|
}, |
|
# report client uses the consumer_generation that it got in the |
|
# allocation request |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_claim_resources_success_force_evacuate_with_shared(self): |
|
"""Similar test that |
|
test_claim_resources_success_force_evacuate_no_shared but adds shared |
|
disk into the mix. |
|
""" |
|
# the source allocation is also held by the instance_uuid so report |
|
# client will see it. |
|
current_allocs = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.shared_storage: { |
|
'generation': 42, |
|
'resources': { |
|
'DISK_GB': 20, |
|
}, |
|
}, |
|
}, |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse( |
|
status_code=200, |
|
content=jsonutils.dumps(current_allocs)) |
|
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse( |
|
status_code=204) |
|
consumer_uuid = uuids.consumer_uuid |
|
# this is an evacuate so we have the same resources request towards the |
|
# dest host |
|
alloc_req = { |
|
'allocations': { |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.shared_storage: { |
|
'generation': 42, |
|
'resources': { |
|
'DISK_GB': 20, |
|
}, |
|
}, |
|
}, |
|
# this allocation request comes from the conductor that read the |
|
# allocation from placement therefore it has consumer_generation in |
|
# it. |
|
"consumer_generation": 1, |
|
"project_id": uuids.project_id, |
|
"user_id": uuids.user_id |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# we expect that both the source and dest allocations are here plus the |
|
# shared storage allocation |
|
expected_payload = { |
|
'allocations': { |
|
uuids.source_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.dest_host: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
uuids.shared_storage: { |
|
'resources': { |
|
'DISK_GB': 20, |
|
}, |
|
}, |
|
}, |
|
# report client uses the consumer_generation that it got in the |
|
# allocation request |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
|
|
self.assertTrue(res) |
|
|
|
@mock.patch('time.sleep', new=mock.Mock()) |
|
def test_claim_resources_fail_due_to_rp_generation_retry_success(self): |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.return_value = { |
|
'allocations': {}, # build instance, not move |
|
} |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mocks = [ |
|
fake_requests.FakeResponse( |
|
409, |
|
jsonutils.dumps( |
|
{'errors': [ |
|
{'code': 'placement.concurrent_update', |
|
'detail': ''}]})), |
|
fake_requests.FakeResponse(204) |
|
] |
|
self.ks_adap_mock.put.side_effect = resp_mocks |
|
consumer_uuid = uuids.consumer_uuid |
|
alloc_req = { |
|
'allocations': { |
|
uuids.cn1: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
}, |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = { |
|
'allocations': |
|
{rp_uuid: res |
|
for rp_uuid, res in alloc_req['allocations'].items()} |
|
} |
|
expected_payload['project_id'] = project_id |
|
expected_payload['user_id'] = user_id |
|
expected_payload['consumer_generation'] = None |
|
# We should have exactly two calls to the placement API that look |
|
# identical since we're retrying the same HTTP request |
|
expected_calls = [ |
|
mock.call(expected_url, microversion='1.28', json=expected_payload, |
|
headers={'X-Openstack-Request-Id': |
|
self.context.global_id})] * 2 |
|
self.assertEqual(len(expected_calls), |
|
self.ks_adap_mock.put.call_count) |
|
self.ks_adap_mock.put.assert_has_calls(expected_calls) |
|
|
|
self.assertTrue(res) |
|
|
|
@mock.patch.object(report.LOG, 'warning') |
|
def test_claim_resources_failure(self, mock_log): |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.return_value = { |
|
'allocations': {}, # build instance, not move |
|
} |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = fake_requests.FakeResponse( |
|
409, |
|
jsonutils.dumps( |
|
{'errors': [ |
|
{'code': 'something else', |
|
'detail': 'not cool'}]})) |
|
|
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
alloc_req = { |
|
'allocations': { |
|
uuids.cn1: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
}, |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.claim_resources(self.context, consumer_uuid, |
|
alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = { |
|
'allocations': |
|
{rp_uuid: res |
|
for rp_uuid, res in alloc_req['allocations'].items()} |
|
} |
|
expected_payload['project_id'] = project_id |
|
expected_payload['user_id'] = user_id |
|
expected_payload['consumer_generation'] = None |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=expected_payload, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
|
|
self.assertFalse(res) |
|
self.assertTrue(mock_log.called) |
|
|
|
def test_claim_resources_consumer_generation_failure(self): |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.return_value = { |
|
'allocations': {}, # build instance, not move |
|
} |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = fake_requests.FakeResponse( |
|
409, |
|
jsonutils.dumps( |
|
{'errors': [ |
|
{'code': 'placement.concurrent_update', |
|
'detail': 'consumer generation conflict'}]})) |
|
|
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
alloc_req = { |
|
'allocations': { |
|
uuids.cn1: { |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
} |
|
}, |
|
}, |
|
} |
|
|
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
self.assertRaises(exception.AllocationUpdateFailed, |
|
self.client.claim_resources, self.context, |
|
consumer_uuid, alloc_req, project_id, user_id, |
|
allocation_request_version='1.28') |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
expected_payload = { |
|
'allocations': { |
|
rp_uuid: res |
|
for rp_uuid, res in alloc_req['allocations'].items()}, |
|
'project_id': project_id, |
|
'user_id': user_id, |
|
'consumer_generation': None} |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=expected_payload, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
|
|
def test_remove_provider_from_inst_alloc_no_shared(self): |
|
"""Tests that the method which manipulates an existing doubled-up |
|
allocation for a move operation to remove the source host results in |
|
sending placement the proper payload to PUT |
|
/allocations/{consumer_uuid} call. |
|
""" |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.side_effect = [ |
|
{ |
|
'allocations': { |
|
uuids.source: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': uuids.project_id, |
|
'user_id': uuids.user_id, |
|
}, |
|
# the second get is for resource providers in the compute tree, |
|
# return just the compute |
|
{ |
|
"resource_providers": [ |
|
{ |
|
"uuid": uuids.source, |
|
}, |
|
] |
|
}, |
|
] |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = mock.Mock(status_code=204) |
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.remove_provider_tree_from_instance_allocation( |
|
self.context, consumer_uuid, uuids.source) |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# New allocations should only include the destination... |
|
expected_payload = { |
|
'allocations': { |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id |
|
} |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_remove_provider_from_inst_alloc_with_shared(self): |
|
"""Tests that the method which manipulates an existing doubled-up |
|
allocation with DISK_GB being consumed from a shared storage provider |
|
for a move operation to remove the source host results in sending |
|
placement the proper payload to PUT /allocations/{consumer_uuid} |
|
call. |
|
""" |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.side_effect = [ |
|
{ |
|
'allocations': { |
|
uuids.source: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.shared_storage: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'DISK_GB': 100, |
|
}, |
|
}, |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': uuids.project_id, |
|
'user_id': uuids.user_id, |
|
}, |
|
# the second get is for resource providers in the compute tree, |
|
# return just the compute |
|
{ |
|
"resource_providers": [ |
|
{ |
|
"uuid": uuids.source, |
|
}, |
|
] |
|
}, |
|
] |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = mock.Mock(status_code=204) |
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.remove_provider_tree_from_instance_allocation( |
|
self.context, consumer_uuid, uuids.source) |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# New allocations should only include the destination... |
|
expected_payload = { |
|
'allocations': { |
|
uuids.shared_storage: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'DISK_GB': 100, |
|
}, |
|
}, |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id |
|
} |
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
|
|
self.assertTrue(res) |
|
|
|
def test_remove_provider_from_inst_alloc_no_source(self): |
|
"""Tests that if remove_provider_tree_from_instance_allocation() fails |
|
to find any allocations for the source host, it just returns True and |
|
does not attempt to rewrite the allocation for the consumer. |
|
""" |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.side_effect = [ |
|
# Act like the allocations already did not include the source host |
|
# for some reason |
|
{ |
|
'allocations': { |
|
uuids.shared_storage: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'DISK_GB': 100, |
|
}, |
|
}, |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': uuids.project_id, |
|
'user_id': uuids.user_id, |
|
}, |
|
# the second get is for resource providers in the compute tree, |
|
# return just the compute |
|
{ |
|
"resource_providers": [ |
|
{ |
|
"uuid": uuids.source, |
|
}, |
|
] |
|
}, |
|
] |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
res = self.client.remove_provider_tree_from_instance_allocation( |
|
self.context, consumer_uuid, uuids.source) |
|
|
|
self.ks_adap_mock.get.assert_called() |
|
self.ks_adap_mock.put.assert_not_called() |
|
|
|
self.assertTrue(res) |
|
|
|
def test_remove_provider_from_inst_alloc_fail_get_allocs(self): |
|
self.ks_adap_mock.get.return_value = fake_requests.FakeResponse( |
|
status_code=500) |
|
consumer_uuid = uuids.consumer_uuid |
|
self.assertRaises( |
|
exception.ConsumerAllocationRetrievalFailed, |
|
self.client.remove_provider_tree_from_instance_allocation, |
|
self.context, consumer_uuid, uuids.source) |
|
|
|
self.ks_adap_mock.get.assert_called() |
|
self.ks_adap_mock.put.assert_not_called() |
|
|
|
def test_remove_provider_from_inst_alloc_consumer_gen_conflict(self): |
|
get_resp_mock = mock.Mock(status_code=200) |
|
get_resp_mock.json.side_effect = [ |
|
{ |
|
'allocations': { |
|
uuids.source: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': uuids.project_id, |
|
'user_id': uuids.user_id, |
|
}, |
|
# the second get is for resource providers in the compute tree, |
|
# return just the compute |
|
{ |
|
"resource_providers": [ |
|
{ |
|
"uuid": uuids.source, |
|
}, |
|
] |
|
}, |
|
] |
|
self.ks_adap_mock.get.return_value = get_resp_mock |
|
resp_mock = mock.Mock(status_code=409) |
|
self.ks_adap_mock.put.return_value = resp_mock |
|
consumer_uuid = uuids.consumer_uuid |
|
res = self.client.remove_provider_tree_from_instance_allocation( |
|
self.context, consumer_uuid, uuids.source) |
|
|
|
self.assertFalse(res) |
|
|
|
def test_remove_provider_tree_from_inst_alloc_nested(self): |
|
self.ks_adap_mock.get.side_effect = [ |
|
fake_requests.FakeResponse( |
|
status_code=200, |
|
content=jsonutils.dumps( |
|
{ |
|
'allocations': { |
|
uuids.source_compute: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
uuids.source_nested: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'CUSTOM_MAGIC': 1 |
|
}, |
|
}, |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': uuids.project_id, |
|
'user_id': uuids.user_id, |
|
})), |
|
# the second get is for resource providers in the compute tree, |
|
# return both RPs in the source compute tree |
|
fake_requests.FakeResponse( |
|
status_code=200, |
|
content=jsonutils.dumps( |
|
{ |
|
"resource_providers": [ |
|
{ |
|
"uuid": uuids.source_compute, |
|
}, |
|
{ |
|
"uuid": uuids.source_nested, |
|
}, |
|
] |
|
})) |
|
] |
|
self.ks_adap_mock.put.return_value = fake_requests.FakeResponse( |
|
status_code=204) |
|
consumer_uuid = uuids.consumer_uuid |
|
project_id = uuids.project_id |
|
user_id = uuids.user_id |
|
res = self.client.remove_provider_tree_from_instance_allocation( |
|
self.context, consumer_uuid, uuids.source_compute) |
|
|
|
expected_url = "/allocations/%s" % consumer_uuid |
|
# New allocations should only include the destination... |
|
expected_payload = { |
|
'allocations': { |
|
uuids.destination: { |
|
'resource_provider_generation': 42, |
|
'resources': { |
|
'VCPU': 1, |
|
'MEMORY_MB': 1024, |
|
}, |
|
}, |
|
}, |
|
'consumer_generation': 1, |
|
'project_id': project_id, |
|
'user_id': user_id |
|
} |
|
|
|
self.assertEqual( |
|
[ |
|
mock.call( |
|
'/allocations/%s' % consumer_uuid, |
|
headers=mock.ANY, |
|
microversion='1.28' |
|
), |
|
mock.call( |
|
'/resource_providers?in_tree=%s' % uuids.source_compute, |
|
headers=mock.ANY, |
|
microversion='1.14' |
|
) |
|
], |
|
self.ks_adap_mock.get.mock_calls) |
|
|
|
# We have to pull the json body from the mock call_args to validate |
|
# it separately otherwise hash seed issues get in the way. |
|
actual_payload = self.ks_adap_mock.put.call_args[1]['json'] |
|
self.assertEqual(expected_payload, actual_payload) |
|
self.ks_adap_mock.put.assert_called_once_with( |
|
expected_url, microversion='1.28', json=mock.ANY, |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
|
|
self.assertTrue(res) |
|
|
|
|
|
class TestMoveAllocations(SchedulerReportClientTestCase): |
|
|
|
def setUp(self): |
|
super(TestMoveAllocations, self).setUp() |
|
# We want to reuse the mock throughout the class, but with |
|
# different return values. |
|
patcher = mock.patch( |
|
'nova.scheduler.client.report.SchedulerReportClient.post') |
|
self.mock_post = patcher.start() |
|
self.addCleanup(patcher.stop) |
|
self.mock_post.return_value.status_code = 204 |
|
self.rp_uuid = mock.sentinel.rp |
|
self.consumer_uuid = mock.sentinel.consumer |
|
self.data = {"MEMORY_MB": 1024} |
|
patcher = mock.patch( |
|
'nova.scheduler.client.report.SchedulerReportClient.get') |
|
self.mock_get = patcher.start() |
|
self.addCleanup(patcher.stop) |
|
|
|
self.project_id = mock.sentinel.project_id |
|
self.user_id = mock.sentinel.user_id |
|
|
|
self.mock_post.return_value.status_code = 204 |
|
self.rp_uuid = mock.sentinel.rp |
|
self.source_consumer_uuid = mock.sentinel.source_consumer |
|
self.target_consumer_uuid = mock.sentinel.target_consumer |
|
self.source_consumer_data = { |
|
"allocations": { |
|
self.rp_uuid: { |
|
"generation": 1, |
|
"resources": { |
|
"MEMORY_MB": 1024 |
|
} |
|
} |
|
}, |
|
"consumer_generation": 2, |
|
"project_id": self.project_id, |
|
"user_id": self.user_id |
|
} |
|
self.source_rsp = mock.Mock() |
|
self.source_rsp.json.return_value = self.source_consumer_data |
|
self.target_consumer_data = { |
|
"allocations": { |
|
self.rp_uuid: { |
|
"generation": 1, |
|
"resources": { |
|
"MEMORY_MB": 2048 |
|
} |
|
} |
|
}, |
|
"consumer_generation": 1, |
|
"project_id": self.project_id, |
|
"user_id": self.user_id |
|
} |
|
self.target_rsp = mock.Mock() |
|
self.target_rsp.json.return_value = self.target_consumer_data |
|
self.mock_get.side_effect = [self.source_rsp, self.target_rsp] |
|
self.expected_url = '/allocations' |
|
self.expected_microversion = '1.28' |
|
|
|
def test_url_microversion(self): |
|
resp = self.client.move_allocations( |
|
self.context, self.source_consumer_uuid, self.target_consumer_uuid) |
|
|
|
self.assertTrue(resp) |
|
self.mock_post.assert_called_once_with( |
|
self.expected_url, mock.ANY, |
|
version=self.expected_microversion, |
|
global_request_id=self.context.global_id) |
|
|
|
def test_move_to_empty_target(self): |
|
self.target_consumer_data = {"allocations": {}} |
|
target_rsp = mock.Mock() |
|
target_rsp.json.return_value = self.target_consumer_data |
|
self.mock_get.side_effect = [self.source_rsp, target_rsp] |
|
|
|
expected_payload = { |
|
self.target_consumer_uuid: { |
|
"allocations": { |
|
self.rp_uuid: { |
|
"resources": { |
|
"MEMORY_MB": 1024 |
|
}, |
|
"generation": 1 |
|
} |
|
}, |
|
"consumer_generation": None, |
|
"project_id": self.project_id, |
|
"user_id": self.user_id, |
|
}, |
|
self.source_consumer_uuid: { |
|
"allocations": {}, |
|
"consumer_generation": 2, |
|
"project_id": self.project_id, |
|
"user_id": self.user_id, |
|
} |
|
} |
|
|
|
resp = self.client.move_allocations( |
|
self.context, self.source_consumer_uuid, self.target_consumer_uuid) |
|
|
|
self.assertTrue(resp) |
|
self.mock_post.assert_called_once_with( |
|
self.expected_url, expected_payload, |
|
version=self.expected_microversion, |
|
global_request_id=self.context.global_id) |
|
|
|
@mock.patch('nova.scheduler.client.report.LOG.info') |
|
def test_move_from_empty_source(self, mock_info): |
|
"""Tests the case that the target has allocations but the source does |
|
not so the move_allocations method assumes the allocations were already |
|
moved and returns True without trying to POST /allocations. |
|
""" |
|
source_consumer_data = {"allocations": {}} |
|
source_rsp = mock.Mock() |
|
source_rsp.json.return_value = source_consumer_data |
|
self.mock_get.side_effect = [source_rsp, self.target_rsp] |
|
|
|
resp = self.client.move_allocations( |
|
self.context, self.source_consumer_uuid, self.target_consumer_uuid) |
|
|
|
self.assertTrue(resp) |
|
self.mock_post.assert_not_called() |
|
mock_info.assert_called_once() |
|
self.assertIn('Allocations not found for consumer', |
|
mock_info.call_args[0][0]) |
|
|
|
def test_move_to_non_empty_target(self): |
|
self.mock_get.side_effect = [self.source_rsp, self.target_rsp] |
|
|
|
expected_payload = { |
|
self.target_consumer_uuid: { |
|
"allocations": { |
|
self.rp_uuid: { |
|
"resources": { |
|
"MEMORY_MB": 1024 |
|
}, |
|
"generation": 1 |
|
} |
|
}, |
|
"consumer_generation": 1, |
|
"project_id": self.project_id, |
|
"user_id": self.user_id, |
|
}, |
|
self.source_consumer_uuid: { |
|
"allocations": {}, |
|
"consumer_generation": 2, |
|
"project_id": self.project_id, |
|
"user_id": self.user_id, |
|
} |
|
} |
|
|
|
with fixtures.EnvironmentVariable('OS_DEBUG', '1'): |
|
with nova_fixtures.StandardLogging() as stdlog: |
|
resp = self.client.move_allocations( |
|
self.context, self.source_consumer_uuid, |
|
self.target_consumer_uuid) |
|
|
|
self.assertTrue(resp) |
|
self.mock_post.assert_called_once_with( |
|
self.expected_url, expected_payload, |
|
version=self.expected_microversion, |
|
global_request_id=self.context.global_id) |
|
self.assertIn('Overwriting current allocation', |
|
stdlog.logger.output) |
|
|
|
@mock.patch('time.sleep') |
|
def test_409_concurrent_provider_update(self, mock_sleep): |
|
# there will be 1 normal call and 3 retries |
|
self.mock_get.side_effect = [self.source_rsp, self.target_rsp, |
|
self.source_rsp, self.target_rsp, |
|
self.source_rsp, self.target_rsp, |
|
self.source_rsp, self.target_rsp] |
|
rsp = fake_requests.FakeResponse( |
|
409, |
|
jsonutils.dumps( |
|
{'errors': [ |
|
{'code': 'placement.concurrent_update', |
|
'detail': ''}]})) |
|
|
|
self.mock_post.return_value = rsp |
|
|
|
resp = self.client.move_allocations( |
|
self.context, self.source_consumer_uuid, self.target_consumer_uuid) |
|
|
|
self.assertFalse(resp) |
|
# Post was attempted four times. |
|
self.assertEqual(4, self.mock_post.call_count) |
|
|
|
@mock.patch('nova.scheduler.client.report.LOG.warning') |
|
def test_not_409_failure(self, mock_log): |
|
error_message = 'placement not there' |
|
self.mock_post.return_value.status_code = 503 |
|
self.mock_post.return_value.text = error_message |
|
|
|
resp = self.client.move_allocations( |
|
self.context, self.source_consumer_uuid, self.target_consumer_uuid) |
|
|
|
self.assertFalse(resp) |
|
args, kwargs = mock_log.call_args |
|
log_message = args[0] |
|
log_args = args[1] |
|
self.assertIn('Unable to post allocations', log_message) |
|
self.assertEqual(error_message, log_args['text']) |
|
|
|
def test_409_concurrent_consumer_update(self): |
|
self.mock_post.return_value = fake_requests.FakeResponse( |
|
status_code=409, |
|
content=jsonutils.dumps( |
|
{'errors': [{'code': 'placement.concurrent_update', |
|
'detail': 'consumer generation conflict'}]})) |
|
|
|
self.assertRaises(exception.AllocationMoveFailed, |
|
self.client.move_allocations, self.context, |
|
self.source_consumer_uuid, self.target_consumer_uuid) |
|
|
|
|
|
class TestProviderOperations(SchedulerReportClientTestCase): |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_resource_provider') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_inventory') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_provider_aggregates') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'get_provider_traits') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_sharing_providers') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
def test_ensure_resource_provider_get(self, get_rpt_mock, get_shr_mock, |
|
get_trait_mock, get_agg_mock, get_inv_mock, create_rp_mock): |
|
# No resource provider exists in the client's cache, so validate that |
|
# if we get the resource provider from the placement API that we don't |
|
# try to create the resource provider. |
|
get_rpt_mock.return_value = [{ |
|
'uuid': uuids.compute_node, |
|
'name': mock.sentinel.name, |
|
'generation': 1, |
|
}] |
|
|
|
get_inv_mock.return_value = None |
|
get_agg_mock.return_value = report.AggInfo( |
|
aggregates=set([uuids.agg1]), generation=42) |
|
get_trait_mock.return_value = report.TraitInfo( |
|
traits=set(['CUSTOM_GOLD']), generation=43) |
|
get_shr_mock.return_value = [] |
|
|
|
def assert_cache_contents(): |
|
self.assertTrue( |
|
self.client._provider_tree.exists(uuids.compute_node)) |
|
self.assertTrue( |
|
self.client._provider_tree.in_aggregates(uuids.compute_node, |
|
[uuids.agg1])) |
|
self.assertFalse( |
|
self.client._provider_tree.in_aggregates(uuids.compute_node, |
|
[uuids.agg2])) |
|
self.assertTrue( |
|
self.client._provider_tree.has_traits(uuids.compute_node, |
|
['CUSTOM_GOLD'])) |
|
self.assertFalse( |
|
self.client._provider_tree.has_traits(uuids.compute_node, |
|
['CUSTOM_SILVER'])) |
|
data = self.client._provider_tree.data(uuids.compute_node) |
|
self.assertEqual(43, data.generation) |
|
|
|
self.client._ensure_resource_provider(self.context, uuids.compute_node) |
|
|
|
assert_cache_contents() |
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) |
|
get_agg_mock.assert_called_once_with(self.context, uuids.compute_node) |
|
get_trait_mock.assert_called_once_with(self.context, |
|
uuids.compute_node) |
|
get_shr_mock.assert_called_once_with(self.context, set([uuids.agg1])) |
|
self.assertFalse(create_rp_mock.called) |
|
|
|
# Now that the cache is populated, a subsequent call should be a no-op. |
|
get_rpt_mock.reset_mock() |
|
get_agg_mock.reset_mock() |
|
get_trait_mock.reset_mock() |
|
get_shr_mock.reset_mock() |
|
|
|
self.client._ensure_resource_provider(self.context, uuids.compute_node) |
|
|
|
assert_cache_contents() |
|
get_rpt_mock.assert_not_called() |
|
get_agg_mock.assert_not_called() |
|
get_trait_mock.assert_not_called() |
|
get_shr_mock.assert_not_called() |
|
create_rp_mock.assert_not_called() |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_resource_provider') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_associations') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
def test_ensure_resource_provider_create_fail(self, get_rpt_mock, |
|
refresh_mock, create_rp_mock): |
|
# No resource provider exists in the client's cache, and |
|
# _create_provider raises, indicating there was an error with the |
|
# create call. Ensure we don't populate the resource provider cache |
|
get_rpt_mock.return_value = [] |
|
create_rp_mock.side_effect = exception.ResourceProviderCreationFailed( |
|
name=uuids.compute_node) |
|
|
|
self.assertRaises( |
|
exception.ResourceProviderCreationFailed, |
|
self.client._ensure_resource_provider, self.context, |
|
uuids.compute_node) |
|
|
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) |
|
create_rp_mock.assert_called_once_with( |
|
self.context, uuids.compute_node, uuids.compute_node, |
|
parent_provider_uuid=None) |
|
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node)) |
|
self.assertFalse(refresh_mock.called) |
|
self.assertRaises( |
|
ValueError, |
|
self.client._provider_tree.in_aggregates, uuids.compute_node, []) |
|
self.assertRaises( |
|
ValueError, |
|
self.client._provider_tree.has_traits, uuids.compute_node, []) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_resource_provider', return_value=None) |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_associations') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
def test_ensure_resource_provider_create_no_placement(self, get_rpt_mock, |
|
refresh_mock, create_rp_mock): |
|
# No resource provider exists in the client's cache, and |
|
# @safe_connect on _create_resource_provider returns None because |
|
# Placement isn't running yet. Ensure we don't populate the resource |
|
# provider cache. |
|
get_rpt_mock.return_value = [] |
|
|
|
self.assertRaises( |
|
exception.ResourceProviderCreationFailed, |
|
self.client._ensure_resource_provider, self.context, |
|
uuids.compute_node) |
|
|
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) |
|
create_rp_mock.assert_called_once_with( |
|
self.context, uuids.compute_node, uuids.compute_node, |
|
parent_provider_uuid=None) |
|
self.assertFalse(self.client._provider_tree.exists(uuids.compute_node)) |
|
refresh_mock.assert_not_called() |
|
self.assertRaises( |
|
ValueError, |
|
self.client._provider_tree.in_aggregates, uuids.compute_node, []) |
|
self.assertRaises( |
|
ValueError, |
|
self.client._provider_tree.has_traits, uuids.compute_node, []) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_resource_provider') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_and_get_inventory') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_associations') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
def test_ensure_resource_provider_create(self, get_rpt_mock, |
|
refresh_inv_mock, |
|
refresh_assoc_mock, |
|
create_rp_mock): |
|
# No resource provider exists in the client's cache and no resource |
|
# provider was returned from the placement API, so verify that in this |
|
# case we try to create the resource provider via the placement API. |
|
get_rpt_mock.return_value = [] |
|
create_rp_mock.return_value = { |
|
'uuid': uuids.compute_node, |
|
'name': 'compute-name', |
|
'generation': 1, |
|
} |
|
self.assertEqual( |
|
uuids.compute_node, |
|
self.client._ensure_resource_provider(self.context, |
|
uuids.compute_node)) |
|
self._validate_provider(uuids.compute_node, name='compute-name', |
|
generation=1, parent_uuid=None, |
|
aggregates=set(), traits=set()) |
|
|
|
# We don't refresh for a just-created provider |
|
refresh_inv_mock.assert_not_called() |
|
refresh_assoc_mock.assert_not_called() |
|
get_rpt_mock.assert_called_once_with(self.context, uuids.compute_node) |
|
create_rp_mock.assert_called_once_with( |
|
self.context, |
|
uuids.compute_node, |
|
uuids.compute_node, # name param defaults to UUID if None |
|
parent_provider_uuid=None, |
|
) |
|
self.assertTrue(self.client._provider_tree.exists(uuids.compute_node)) |
|
|
|
create_rp_mock.reset_mock() |
|
|
|
# Validate the path where we specify a name (don't default to the UUID) |
|
self.client._ensure_resource_provider( |
|
self.context, uuids.cn2, 'a-name') |
|
create_rp_mock.assert_called_once_with( |
|
self.context, uuids.cn2, 'a-name', parent_provider_uuid=None) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_associations') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_resource_provider') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
def test_ensure_resource_provider_tree(self, get_rpt_mock, create_rp_mock, |
|
refresh_mock): |
|
"""Test _ensure_resource_provider with a tree of providers.""" |
|
def _create_resource_provider(context, uuid, name, |
|
parent_provider_uuid=None): |
|
"""Mock side effect for creating the RP with the specified args.""" |
|
return { |
|
'uuid': uuid, |
|
'name': name, |
|
'generation': 0, |
|
'parent_provider_uuid': parent_provider_uuid |
|
} |
|
create_rp_mock.side_effect = _create_resource_provider |
|
|
|
# We at least have to simulate the part of _refresh_associations that |
|
# marks a provider as 'seen' |
|
def mocked_refresh(context, rp_uuid, **kwargs): |
|
self.client._association_refresh_time[rp_uuid] = time.time() |
|
refresh_mock.side_effect = mocked_refresh |
|
|
|
# Not initially in the placement database, so we have to create it. |
|
get_rpt_mock.return_value = [] |
|
|
|
# Create the root |
|
root = self.client._ensure_resource_provider(self.context, uuids.root) |
|
self.assertEqual(uuids.root, root) |
|
|
|
# Now create a child |
|
child1 = self.client._ensure_resource_provider( |
|
self.context, uuids.child1, name='junior', |
|
parent_provider_uuid=uuids.root) |
|
self.assertEqual(uuids.child1, child1) |
|
|
|
# If we re-ensure the child, we get the object from the tree, not a |
|
# newly-created one - i.e. the early .find() works like it should. |
|
self.assertIs(child1, |
|
self.client._ensure_resource_provider(self.context, |
|
uuids.child1)) |
|
|
|
# Make sure we can create a grandchild |
|
grandchild = self.client._ensure_resource_provider( |
|
self.context, uuids.grandchild, |
|
parent_provider_uuid=uuids.child1) |
|
self.assertEqual(uuids.grandchild, grandchild) |
|
|
|
# Now create a second child of the root and make sure it doesn't wind |
|
# up in some crazy wrong place like under child1 or grandchild |
|
child2 = self.client._ensure_resource_provider( |
|
self.context, uuids.child2, parent_provider_uuid=uuids.root) |
|
self.assertEqual(uuids.child2, child2) |
|
|
|
all_rp_uuids = [uuids.root, uuids.child1, uuids.child2, |
|
uuids.grandchild] |
|
|
|
# At this point we should get all the providers. |
|
self.assertEqual( |
|
set(all_rp_uuids), |
|
set(self.client._provider_tree.get_provider_uuids())) |
|
|
|
# And now _ensure is a no-op because everything is cached |
|
get_rpt_mock.reset_mock() |
|
create_rp_mock.reset_mock() |
|
refresh_mock.reset_mock() |
|
|
|
for rp_uuid in all_rp_uuids: |
|
self.client._ensure_resource_provider(self.context, rp_uuid) |
|
get_rpt_mock.assert_not_called() |
|
create_rp_mock.assert_not_called() |
|
refresh_mock.assert_not_called() |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_associations') |
|
def test_ensure_resource_provider_refresh_fetch(self, mock_ref_assoc, |
|
mock_gpit): |
|
"""Make sure refreshes are called with the appropriate UUIDs and flags |
|
when we fetch the provider tree from placement. |
|
""" |
|
tree_uuids = set([uuids.root, uuids.one, uuids.two]) |
|
mock_gpit.return_value = [{'uuid': u, 'name': u, 'generation': 42} |
|
for u in tree_uuids] |
|
self.assertEqual(uuids.root, |
|
self.client._ensure_resource_provider(self.context, |
|
uuids.root)) |
|
mock_gpit.assert_called_once_with(self.context, uuids.root) |
|
mock_ref_assoc.assert_has_calls( |
|
[mock.call(self.context, uuid, force=True) |
|
for uuid in tree_uuids]) |
|
self.assertEqual(tree_uuids, |
|
set(self.client._provider_tree.get_provider_uuids())) |
|
|
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_get_providers_in_tree') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_create_resource_provider') |
|
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.' |
|
'_refresh_associations') |
|
def test_ensure_resource_provider_refresh_create(self, mock_refresh, |
|
mock_create, mock_gpit): |
|
"""Make sure refresh is not called when we create the RP.""" |
|
mock_gpit.return_value = [] |
|
mock_create.return_value = {'name': 'cn', 'uuid': uuids.cn, |
|
'generation': 42} |
|
self.assertEqual(uuids.root, |
|
self.client._ensure_resource_provider(self.context, |
|
uuids.root)) |
|
mock_gpit.assert_called_once_with(self.context, uuids.root) |
|
mock_create.assert_called_once_with(self.context, uuids.root, |
|
uuids.root, |
|
parent_provider_uuid=None) |
|
mock_refresh.assert_not_called() |
|
self.assertEqual([uuids.cn], |
|
self.client._provider_tree.get_provider_uuids()) |
|
|
|
def test_get_allocation_candidates(self): |
|
resp_mock = mock.Mock(status_code=200) |
|
json_data = { |
|
'allocation_requests': mock.sentinel.alloc_reqs, |
|
'provider_summaries': mock.sentinel.p_sums, |
|
} |
|
resources = scheduler_utils.ResourceRequest.from_extra_specs({ |
|
'resources:VCPU': '1', |
|
'resources:MEMORY_MB': '1024', |
|
'trait:HW_CPU_X86_AVX': 'required', |
|
'trait:CUSTOM_TRAIT1': 'required', |
|
'trait:CUSTOM_TRAIT2': 'preferred', |
|
'trait:CUSTOM_TRAIT3': 'forbidden', |
|
'trait:CUSTOM_TRAIT4': 'forbidden', |
|
'resources1:DISK_GB': '30', |
|
'trait1:STORAGE_DISK_SSD': 'required', |
|
'resources2:VGPU': '2', |
|
'trait2:HW_GPU_RESOLUTION_W2560H1600': 'required', |
|
'trait2:HW_GPU_API_VULKAN': 'required', |
|
'resources3:SRIOV_NET_VF': '1', |
|
'resources3:CUSTOM_NET_EGRESS_BYTES_SEC': '125000', |
|
'group_policy': 'isolate', |
|
# These are ignored because misspelled, bad value, etc. |
|
'resources02:CUSTOM_WIDGET': '123', |
|
'trait:HW_NIC_OFFLOAD_LRO': 'preferred', |
|
'group_policy3': 'none', |
|
}) |
|
resources.get_request_group(None).aggregates = [ |
|
['agg1', 'agg2', 'agg3'], ['agg1', 'agg2']] |
|
expected_path = '/allocation_candidates' |
|
expected_query = [ |
|
('group_policy', 'isolate'), |
|
('limit', '1000'), |
|
('member_of', 'in:agg1,agg2'), |
|
('member_of', 'in:agg1,agg2,agg3'), |
|
('required', 'CUSTOM_TRAIT1,HW_CPU_X86_AVX,!CUSTOM_TRAIT3,' |
|
'!CUSTOM_TRAIT4'), |
|
('required1', 'STORAGE_DISK_SSD'), |
|
('required2', 'HW_GPU_API_VULKAN,HW_GPU_RESOLUTION_W2560H1600'), |
|
('resources', 'MEMORY_MB:1024,VCPU:1'), |
|
('resources1', 'DISK_GB:30'), |
|
('resources2', 'VGPU:2'), |
|
('resources3', 'CUSTOM_NET_EGRESS_BYTES_SEC:125000,SRIOV_NET_VF:1') |
|
] |
|
|
|
resp_mock.json.return_value = json_data |
|
self.ks_adap_mock.get.return_value = resp_mock |
|
|
|
alloc_reqs, p_sums, allocation_request_version = ( |
|
self.client.get_allocation_candidates(self.context, resources)) |
|
|
|
url = self.ks_adap_mock.get.call_args[0][0] |
|
split_url = parse.urlsplit(url) |
|
query = parse.parse_qsl(split_url.query) |
|
self.assertEqual(expected_path, split_url.path) |
|
self.assertEqual(expected_query, query) |
|
expected_url = '/allocation_candidates?%s' % parse.urlencode( |
|
expected_query) |
|
self.ks_adap_mock.get.assert_called_once_with( |
|
expected_url, microversion='1.31', |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) |
|
self.assertEqual(mock.sentinel.p_sums, p_sums) |
|
|
|
def test_get_ac_no_trait_bogus_group_policy_custom_limit(self): |
|
self.flags(max_placement_results=42, group='scheduler') |
|
resp_mock = mock.Mock(status_code=200) |
|
json_data = { |
|
'allocation_requests': mock.sentinel.alloc_reqs, |
|
'provider_summaries': mock.sentinel.p_sums, |
|
} |
|
resources = scheduler_utils.ResourceRequest.from_extra_specs({ |
|
'resources:VCPU': '1', |
|
'resources:MEMORY_MB': '1024', |
|
'resources1:DISK_GB': '30', |
|
'group_policy': 'bogus', |
|
}) |
|
expected_path = '/allocation_candidates' |
|
expected_query = [ |
|
('limit', '42'), |
|
('resources', 'MEMORY_MB:1024,VCPU:1'), |
|
('resources1', 'DISK_GB:30'), |
|
] |
|
|
|
resp_mock.json.return_value = json_data |
|
self.ks_adap_mock.get.return_value = resp_mock |
|
|
|
alloc_reqs, p_sums, allocation_request_version = ( |
|
self.client.get_allocation_candidates(self.context, resources)) |
|
|
|
url = self.ks_adap_mock.get.call_args[0][0] |
|
split_url = parse.urlsplit(url) |
|
query = parse.parse_qsl(split_url.query) |
|
self.assertEqual(expected_path, split_url.path) |
|
self.assertEqual(expected_query, query) |
|
expected_url = '/allocation_candidates?%s' % parse.urlencode( |
|
expected_query) |
|
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs) |
|
self.ks_adap_mock.get.assert_called_once_with( |
|
expected_url, microversion='1.31', |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
self.assertEqual(mock.sentinel.p_sums, p_sums) |
|
|
|
def test_get_allocation_candidates_not_found(self): |
|
# Ensure _get_resource_provider() just returns None when the placement |
|
# API doesn't find a resource provider matching a UUID |
|
resp_mock = mock.Mock(status_code=404) |
|
self.ks_adap_mock.get.return_value = resp_mock |
|
expected_path = '/allocation_candidates' |
|
expected_query = {'resources': ['MEMORY_MB:1024'], |
|
'limit': ['100']} |
|
|
|
# Make sure we're also honoring the configured limit |
|
self.flags(max_placement_results=100, group='scheduler') |
|
|
|
resources = scheduler_utils.ResourceRequest.from_extra_specs( |
|
{'resources:MEMORY_MB': '1024'}) |
|
|
|
res = self.client.get_allocation_candidates(self.context, resources) |
|
|
|
self.ks_adap_mock.get.assert_called_once_with( |
|
mock.ANY, microversion='1.31', |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
url = self.ks_adap_mock.get.call_args[0][0] |
|
split_url = parse.urlsplit(url) |
|
query = parse.parse_qs(split_url.query) |
|
self.assertEqual(expected_path, split_url.path) |
|
self.assertEqual(expected_query, query) |
|
self.assertIsNone(res[0]) |
|
|
|
def test_get_resource_provider_found(self): |
|
# Ensure _get_resource_provider() returns a dict of resource provider |
|
# if it finds a resource provider record from the placement API |
|
uuid = uuids.compute_node |
|
resp_mock = mock.Mock(status_code=200) |
|
json_data = { |
|
'uuid': uuid, |
|
'name': uuid, |
|
'generation': 42, |
|
'parent_provider_uuid': None, |
|
} |
|
resp_mock.json.return_value = json_data |
|
self.ks_adap_mock.get.return_value = resp_mock |
|
|
|
result = self.client._get_resource_provider(self.context, uuid) |
|
|
|
expected_provider_dict = dict( |
|
uuid=uuid, |
|
name=uuid, |
|
generation=42, |
|
parent_provider_uuid=None, |
|
) |
|
expected_url = '/resource_providers/' + uuid |
|
self.ks_adap_mock.get.assert_called_once_with( |
|
expected_url, microversion='1.14', |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
self.assertEqual(expected_provider_dict, result) |
|
|
|
def test_get_resource_provider_not_found(self): |
|
# Ensure _get_resource_provider() just returns None when the placement |
|
# API doesn't find a resource provider matching a UUID |
|
resp_mock = mock.Mock(status_code=404) |
|
self.ks_adap_mock.get.return_value = resp_mock |
|
|
|
uuid = uuids.compute_node |
|
result = self.client._get_resource_provider(self.context, uuid) |
|
|
|
expected_url = '/resource_providers/' + uuid |
|
self.ks_adap_mock.get.assert_called_once_with( |
|
expected_url, microversion='1.14', |
|
headers={'X-Openstack-Request-Id': self.context.global_id}) |
|
self.assertIsNone(result) |
|
|
|
@mock |