placement: using the dict format for the allocation in claim_resources
In the placement API version 1.12, the dict format is consistent format in the APIs `PUT /allocations/{consumer_uuid}`, `GET /allocations/{consumer_uuid}` and `GET /allocation_candidates`. This patch switches to this consistent format in the claim_resources method. There is one upgrade scene which needs to be care. Since we send alternative hosts to the compute node as Selection object. The Selection object includes the json dict of allocation object. That means if a try of instance building happened in the middle of upgrade, the old format allocation json dict will send back to the new version cell conductor. So the claim_resources method still needs to support old version allocation json format. Change-Id: I4314ade528c9cb69bbf6c55d31566e1e188fef09
This commit is contained in:
parent
8a307bbdf4
commit
48d1ee1dd0
|
@ -21,6 +21,7 @@ import time
|
|||
from keystoneauth1 import exceptions as ks_exc
|
||||
from oslo_log import log as logging
|
||||
from oslo_middleware import request_id
|
||||
from oslo_utils import versionutils
|
||||
from six.moves.urllib import parse
|
||||
|
||||
from nova.compute import provider_tree
|
||||
|
@ -198,21 +199,17 @@ def _move_operation_alloc_request(source_allocs, dest_alloc_req):
|
|||
# already allocated against on the source host (like shared storage
|
||||
# providers)
|
||||
cur_rp_uuids = set(source_allocs.keys())
|
||||
new_rp_uuids = set(a['resource_provider']['uuid']
|
||||
for a in dest_alloc_req['allocations']) - cur_rp_uuids
|
||||
new_rp_uuids = set(dest_alloc_req['allocations']) - cur_rp_uuids
|
||||
|
||||
current_allocs = [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': cur_rp_uuid,
|
||||
},
|
||||
'resources': alloc['resources'],
|
||||
} for cur_rp_uuid, alloc in source_allocs.items()
|
||||
]
|
||||
current_allocs = {
|
||||
cur_rp_uuid: {'resources': alloc['resources']}
|
||||
for cur_rp_uuid, alloc in source_allocs.items()
|
||||
}
|
||||
new_alloc_req = {'allocations': current_allocs}
|
||||
for alloc in dest_alloc_req['allocations']:
|
||||
if alloc['resource_provider']['uuid'] in new_rp_uuids:
|
||||
new_alloc_req['allocations'].append(alloc)
|
||||
for rp_uuid in dest_alloc_req['allocations']:
|
||||
if rp_uuid in new_rp_uuids:
|
||||
new_alloc_req['allocations'][rp_uuid] = dest_alloc_req[
|
||||
'allocations'][rp_uuid]
|
||||
elif not new_rp_uuids:
|
||||
# If there are no new_rp_uuids that means we're resizing to
|
||||
# the same host so we need to sum the allocations for
|
||||
|
@ -223,14 +220,9 @@ def _move_operation_alloc_request(source_allocs, dest_alloc_req):
|
|||
# the compute node/resource tracker is going to adjust for
|
||||
# decrementing any old allocations as necessary, the scheduler
|
||||
# shouldn't make assumptions about that.
|
||||
for current_alloc in current_allocs:
|
||||
# Find the matching resource provider allocations by UUID.
|
||||
if (current_alloc['resource_provider']['uuid'] ==
|
||||
alloc['resource_provider']['uuid']):
|
||||
# Now sum the current allocation resource amounts with
|
||||
# the new allocation resource amounts.
|
||||
scheduler_utils.merge_resources(current_alloc['resources'],
|
||||
alloc['resources'])
|
||||
scheduler_utils.merge_resources(
|
||||
new_alloc_req['allocations'][rp_uuid]['resources'],
|
||||
dest_alloc_req['allocations'][rp_uuid]['resources'])
|
||||
|
||||
LOG.debug("New allocation_request containing both source and "
|
||||
"destination hosts in move operation: %s", new_alloc_req)
|
||||
|
@ -344,7 +336,7 @@ class SchedulerReportClient(object):
|
|||
'resources': resource_query,
|
||||
}
|
||||
|
||||
version = '1.10'
|
||||
version = '1.12'
|
||||
url = "/allocation_candidates?%s" % parse.urlencode(qs_params)
|
||||
resp = self.get(url, version=version)
|
||||
if resp.status_code == 200:
|
||||
|
@ -1158,11 +1150,28 @@ class SchedulerReportClient(object):
|
|||
:returns: True if the allocations were created, False otherwise.
|
||||
"""
|
||||
# Older clients might not send the allocation_request_version, so
|
||||
# default to 1.10
|
||||
# default to 1.10.
|
||||
# TODO(alex_xu): In the rocky, all the client should send the
|
||||
# allocation_request_version. So remove this default value.
|
||||
allocation_request_version = allocation_request_version or '1.10'
|
||||
# Ensure we don't change the supplied alloc request since it's used in
|
||||
# a loop within the scheduler against multiple instance claims
|
||||
ar = copy.deepcopy(alloc_request)
|
||||
|
||||
# If the allocation_request_version less than 1.12, then convert the
|
||||
# allocation array format to the dict format. This conversion can be
|
||||
# remove in Rocky release.
|
||||
if versionutils.convert_version_to_tuple(
|
||||
allocation_request_version) < (1, 12):
|
||||
ar = {
|
||||
'allocations': {
|
||||
alloc['resource_provider']['uuid']: {
|
||||
'resources': alloc['resources']
|
||||
} for alloc in ar['allocations']
|
||||
}
|
||||
}
|
||||
allocation_request_version = '1.12'
|
||||
|
||||
url = '/allocations/%s' % consumer_uuid
|
||||
|
||||
payload = ar
|
||||
|
|
|
@ -141,8 +141,7 @@ class SchedulerManager(manager.Manager):
|
|||
# a host, we can grab an allocation request easily
|
||||
alloc_reqs_by_rp_uuid = collections.defaultdict(list)
|
||||
for ar in alloc_reqs:
|
||||
for rr in ar['allocations']:
|
||||
rp_uuid = rr['resource_provider']['uuid']
|
||||
for rp_uuid in ar['allocations']:
|
||||
alloc_reqs_by_rp_uuid[rp_uuid].append(ar)
|
||||
|
||||
# Only return alternates if both return_objects and return_alternates
|
||||
|
|
|
@ -369,14 +369,9 @@ def claim_resources_on_destination(
|
|||
if source_node_allocations:
|
||||
# Generate an allocation request for the destination node.
|
||||
alloc_request = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': dest_node.uuid
|
||||
},
|
||||
'resources': source_node_allocations
|
||||
}
|
||||
]
|
||||
'allocations': {
|
||||
dest_node.uuid: {'resources': source_node_allocations}
|
||||
}
|
||||
}
|
||||
# The claim_resources method will check for existing allocations
|
||||
# for the instance and effectively "double up" the allocations for
|
||||
|
@ -385,7 +380,8 @@ def claim_resources_on_destination(
|
|||
# we use the existing resource allocations from the source node.
|
||||
if reportclient.claim_resources(
|
||||
instance.uuid, alloc_request,
|
||||
instance.project_id, instance.user_id):
|
||||
instance.project_id, instance.user_id,
|
||||
allocation_request_version='1.12'):
|
||||
LOG.debug('Instance allocations successfully created on '
|
||||
'destination node %(dest)s: %(alloc_request)s',
|
||||
{'dest': dest_node.uuid,
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import time
|
||||
|
||||
from keystoneauth1 import exceptions as ks_exc
|
||||
|
@ -368,6 +367,51 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
mock.call(expected_url, mock.ANY, version='1.8'),
|
||||
mock.call(expected_url, mock.ANY, version='1.8')])
|
||||
|
||||
def test_claim_resources_success_with_old_version(self):
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
'allocations': {}, # build instance, not move
|
||||
}
|
||||
self.ks_adap_mock.get.return_value = get_resp_mock
|
||||
resp_mock = mock.Mock(status_code=204)
|
||||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.cn1
|
||||
},
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = {
|
||||
'allocations': {
|
||||
alloc['resource_provider']['uuid']: {
|
||||
'resources': alloc['resources']
|
||||
}
|
||||
for alloc in alloc_req['allocations']
|
||||
}
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.12', json=expected_payload,
|
||||
raise_exc=False)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
def test_claim_resources_success(self):
|
||||
get_resp_mock = mock.Mock(status_code=200)
|
||||
get_resp_mock.json.return_value = {
|
||||
|
@ -379,12 +423,11 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': {
|
||||
'resource_provider': {
|
||||
'uuid': uuids.cn1,
|
||||
},
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
uuids.cn1: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -392,14 +435,16 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = copy.deepcopy(alloc_req)
|
||||
expected_payload = {'allocations': {
|
||||
rp_uuid: alloc
|
||||
for rp_uuid, alloc in alloc_req['allocations'].items()}}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.10', json=expected_payload,
|
||||
expected_url, microversion='1.12', json=expected_payload,
|
||||
raise_exc=False)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
@ -428,63 +473,49 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.destination,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include resources claimed on both the source
|
||||
# and destination hosts
|
||||
expected_payload = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.source,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.source: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.destination,
|
||||
},
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.10', json=mock.ANY,
|
||||
expected_url, microversion='1.12', json=mock.ANY,
|
||||
raise_exc=False)
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
sort_by_uuid = lambda x: x['resource_provider']['uuid']
|
||||
expected_allocations = sorted(expected_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
actual_allocations = sorted(actual_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
self.assertEqual(expected_allocations, actual_allocations)
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
|
@ -521,81 +552,61 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.destination,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.shared_storage,
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 100,
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include resources claimed on both the source
|
||||
# and destination hosts but not have a doubled-up request for the disk
|
||||
# resources on the shared provider
|
||||
expected_payload = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.source,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.source: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.shared_storage,
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 100,
|
||||
},
|
||||
'DISK_GB': 100
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.destination,
|
||||
},
|
||||
uuids.destination: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
'MEMORY_MB': 1024
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.10', json=mock.ANY,
|
||||
expected_url, microversion='1.12', json=mock.ANY,
|
||||
raise_exc=False)
|
||||
# We have to pull the allocations from the json body from the
|
||||
# mock call_args to validate it separately otherwise hash seed
|
||||
# issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
sort_by_uuid = lambda x: x['resource_provider']['uuid']
|
||||
expected_allocations = sorted(expected_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
actual_allocations = sorted(actual_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
self.assertEqual(expected_allocations, actual_allocations)
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
|
@ -629,57 +640,46 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
# resource class in the new allocation to make sure it's not lost and
|
||||
# that we don't have a KeyError when merging the allocations.
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.same_host,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 2,
|
||||
'MEMORY_MB': 2048,
|
||||
'DISK_GB': 40,
|
||||
'CUSTOM_FOO': 1
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include doubled resources claimed on the same
|
||||
# host.
|
||||
expected_payload = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.same_host,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 3,
|
||||
'MEMORY_MB': 3072,
|
||||
'DISK_GB': 60,
|
||||
'CUSTOM_FOO': 1
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False)
|
||||
expected_url, microversion='1.12', json=mock.ANY, raise_exc=False)
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
sort_by_uuid = lambda x: x['resource_provider']['uuid']
|
||||
expected_allocations = sorted(expected_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
actual_allocations = sorted(actual_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
self.assertEqual(expected_allocations, actual_allocations)
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
|
@ -718,69 +718,52 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
# This is the resize-up allocation where VCPU, MEMORY_MB and DISK_GB
|
||||
# are all being increased but DISK_GB is on a shared storage provider.
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.same_host,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 2,
|
||||
'MEMORY_MB': 2048
|
||||
},
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.shared_storage,
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 40,
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
# New allocation should include doubled resources claimed on the same
|
||||
# host.
|
||||
expected_payload = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.same_host,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.same_host: {
|
||||
'resources': {
|
||||
'VCPU': 3,
|
||||
'MEMORY_MB': 3072
|
||||
},
|
||||
}
|
||||
},
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.shared_storage,
|
||||
},
|
||||
uuids.shared_storage: {
|
||||
'resources': {
|
||||
'DISK_GB': 60,
|
||||
},
|
||||
'DISK_GB': 60
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.10', json=mock.ANY, raise_exc=False)
|
||||
expected_url, microversion='1.12', json=mock.ANY, raise_exc=False)
|
||||
# We have to pull the json body from the mock call_args to validate
|
||||
# it separately otherwise hash seed issues get in the way.
|
||||
actual_payload = self.ks_adap_mock.put.call_args[1]['json']
|
||||
sort_by_uuid = lambda x: x['resource_provider']['uuid']
|
||||
expected_allocations = sorted(expected_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
actual_allocations = sorted(actual_payload['allocations'],
|
||||
key=sort_by_uuid)
|
||||
self.assertEqual(expected_allocations, actual_allocations)
|
||||
self.assertEqual(expected_payload, actual_payload)
|
||||
|
||||
self.assertTrue(res)
|
||||
|
||||
|
@ -801,32 +784,33 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
self.ks_adap_mock.put.side_effect = resp_mocks
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.cn1,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.cn1: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = copy.deepcopy(alloc_req)
|
||||
expected_payload = {
|
||||
'allocations':
|
||||
{rp_uuid: res
|
||||
for rp_uuid, res in alloc_req['allocations'].items()}
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
# We should have exactly two calls to the placement API that look
|
||||
# identical since we're retrying the same HTTP request
|
||||
expected_calls = [
|
||||
mock.call(expected_url, microversion='1.10', json=expected_payload,
|
||||
mock.call(expected_url, microversion='1.12', json=expected_payload,
|
||||
raise_exc=False)] * 2
|
||||
self.assertEqual(len(expected_calls),
|
||||
self.ks_adap_mock.put.call_count)
|
||||
|
@ -845,30 +829,31 @@ class TestPutAllocations(SchedulerReportClientTestCase):
|
|||
self.ks_adap_mock.put.return_value = resp_mock
|
||||
consumer_uuid = uuids.consumer_uuid
|
||||
alloc_req = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.cn1,
|
||||
},
|
||||
'allocations': {
|
||||
uuids.cn1: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 1024,
|
||||
},
|
||||
}
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
project_id = uuids.project_id
|
||||
user_id = uuids.user_id
|
||||
res = self.client.claim_resources(consumer_uuid, alloc_req, project_id,
|
||||
user_id)
|
||||
user_id, allocation_request_version='1.12')
|
||||
|
||||
expected_url = "/allocations/%s" % consumer_uuid
|
||||
expected_payload = copy.deepcopy(alloc_req)
|
||||
expected_payload = {
|
||||
'allocations':
|
||||
{rp_uuid: res
|
||||
for rp_uuid, res in alloc_req['allocations'].items()}
|
||||
}
|
||||
expected_payload['project_id'] = project_id
|
||||
expected_payload['user_id'] = user_id
|
||||
self.ks_adap_mock.put.assert_called_once_with(
|
||||
expected_url, microversion='1.10', json=expected_payload,
|
||||
expected_url, microversion='1.12', json=expected_payload,
|
||||
raise_exc=False)
|
||||
|
||||
self.assertFalse(res)
|
||||
|
@ -1434,7 +1419,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
|||
expected_url = '/allocation_candidates?%s' % parse.urlencode(
|
||||
{'resources': 'MEMORY_MB:1024,VCPU:1'})
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.10')
|
||||
expected_url, raise_exc=False, microversion='1.12')
|
||||
self.assertEqual(mock.sentinel.alloc_reqs, alloc_reqs)
|
||||
self.assertEqual(mock.sentinel.p_sums, p_sums)
|
||||
|
||||
|
@ -1451,7 +1436,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
|||
|
||||
expected_url = '/allocation_candidates?resources=MEMORY_MB%3A1024'
|
||||
self.ks_adap_mock.get.assert_called_once_with(
|
||||
expected_url, raise_exc=False, microversion='1.10')
|
||||
expected_url, raise_exc=False, microversion='1.12')
|
||||
self.assertIsNone(res[0])
|
||||
|
||||
def test_get_resource_provider_found(self):
|
||||
|
|
|
@ -129,18 +129,15 @@ COMPUTE_NODES = [
|
|||
|
||||
ALLOC_REQS = [
|
||||
{
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': cn.uuid,
|
||||
},
|
||||
'allocations': {
|
||||
cn.uuid: {
|
||||
'resources': {
|
||||
'VCPU': 1,
|
||||
'MEMORY_MB': 512,
|
||||
'DISK_GB': 512,
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
} for cn in COMPUTE_NODES
|
||||
]
|
||||
|
||||
|
|
|
@ -483,14 +483,11 @@ class TestUtils(test.NoDBTestCase):
|
|||
'DISK_GB': instance.root_gb
|
||||
}
|
||||
dest_alloc_request = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.dest_node
|
||||
},
|
||||
'allocations': {
|
||||
uuids.dest_node: {
|
||||
'resources': source_res_allocs
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@mock.patch.object(reportclient,
|
||||
|
@ -508,7 +505,8 @@ class TestUtils(test.NoDBTestCase):
|
|||
uuids.source_node, instance.uuid)
|
||||
mock_claim.assert_called_once_with(
|
||||
instance.uuid, dest_alloc_request,
|
||||
instance.project_id, instance.user_id)
|
||||
instance.project_id, instance.user_id,
|
||||
allocation_request_version='1.12')
|
||||
|
||||
test()
|
||||
|
||||
|
@ -527,14 +525,11 @@ class TestUtils(test.NoDBTestCase):
|
|||
'DISK_GB': instance.root_gb
|
||||
}
|
||||
dest_alloc_request = {
|
||||
'allocations': [
|
||||
{
|
||||
'resource_provider': {
|
||||
'uuid': uuids.dest_node
|
||||
},
|
||||
'allocations': {
|
||||
uuids.dest_node: {
|
||||
'resources': source_res_allocs
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@mock.patch.object(reportclient,
|
||||
|
@ -548,7 +543,8 @@ class TestUtils(test.NoDBTestCase):
|
|||
self.assertFalse(mock_get_allocs.called)
|
||||
mock_claim.assert_called_once_with(
|
||||
instance.uuid, dest_alloc_request,
|
||||
instance.project_id, instance.user_id)
|
||||
instance.project_id, instance.user_id,
|
||||
allocation_request_version='1.12')
|
||||
|
||||
test()
|
||||
|
||||
|
|
Loading…
Reference in New Issue