Fix nits in update_provider_tree series
Addresses the following minor/nonblocking review comments from earlier in this series: https://review.openstack.org/#/c/521686/9/nova/scheduler/client/report.py@266 https://review.openstack.org/#/c/521686/9/nova/scheduler/client/report.py@717 https://review.openstack.org/#/c/523932/3/nova/compute/provider_tree.py@159 https://review.openstack.org/#/c/520756/30/nova/compute/provider_tree.py@255 https://review.openstack.org/#/c/521187/29/nova/virt/driver.py@831 https://review.openstack.org/#/c/533808/6/nova/tests/unit/scheduler/client/test_report.py@1882 https://review.openstack.org/#/c/526540/18/nova/tests/unit/scheduler/client/test_report.py@1844 https://review.openstack.org/#/c/536624/3/nova/tests/unit/compute/test_provider_tree.py@325 https://review.openstack.org/#/c/537648/7/nova/scheduler/client/report.py@1164 Change-Id: I416a5a00be844f8995b5521499511daf9aaace4e blueprint: update-provider-tree
This commit is contained in:
@@ -249,7 +249,9 @@ class ProviderTree(object):
|
||||
providers that do not exist in the tree already and will REPLACE
|
||||
providers in the tree if provider_dicts contains providers that are
|
||||
already in the tree. This method will NOT remove providers from the
|
||||
tree that are not in provider_dicts.
|
||||
tree that are not in provider_dicts. But if a parent provider is in
|
||||
provider_dicts and the descendents are not, this method will remove the
|
||||
descendents from the tree.
|
||||
|
||||
:param provider_dicts: An iterable of dicts of resource provider
|
||||
information. If a provider is present in
|
||||
@@ -355,7 +357,7 @@ class ProviderTree(object):
|
||||
exists = False
|
||||
|
||||
if exists:
|
||||
err = _("Provider %s already exists as a root.")
|
||||
err = _("Provider %s already exists.")
|
||||
raise ValueError(err % uuid)
|
||||
|
||||
p = _Provider(name, uuid=uuid, generation=generation)
|
||||
@@ -402,9 +404,19 @@ class ProviderTree(object):
|
||||
:param generation: Generation to set for the new child provider
|
||||
:returns: the UUID of the new provider
|
||||
|
||||
:raises ValueError if parent_uuid points to a non-existing provider.
|
||||
:raises ValueError if a provider with the specified uuid or name
|
||||
already exists; or if parent_uuid points to a nonexistent
|
||||
provider.
|
||||
"""
|
||||
with self.lock:
|
||||
try:
|
||||
self._find_with_lock(uuid or name)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
err = _("Provider %s already exists.")
|
||||
raise ValueError(err % (uuid or name))
|
||||
|
||||
parent_node = self._find_with_lock(parent)
|
||||
p = _Provider(name, uuid, generation, parent_node.uuid)
|
||||
parent_node.add_child(p)
|
||||
|
||||
@@ -258,7 +258,7 @@ class SchedulerReportClient(object):
|
||||
# provider and inventory information
|
||||
self._provider_tree = provider_tree.ProviderTree()
|
||||
# Track the last time we updated providers' aggregates and traits
|
||||
self.association_refresh_time = {}
|
||||
self._association_refresh_time = {}
|
||||
self._client = self._create_client()
|
||||
# NOTE(danms): Keep track of how naggy we've been
|
||||
self._warn_count = 0
|
||||
@@ -268,7 +268,7 @@ class SchedulerReportClient(object):
|
||||
"""Create the HTTP session accessing the placement service."""
|
||||
# Flush provider tree and associations so we start from a clean slate.
|
||||
self._provider_tree = provider_tree.ProviderTree()
|
||||
self.association_refresh_time = {}
|
||||
self._association_refresh_time = {}
|
||||
client = utils.get_ksa_adapter('placement')
|
||||
# Set accept header on every request to ensure we notify placement
|
||||
# service of our response body media type preferences.
|
||||
@@ -756,7 +756,7 @@ class SchedulerReportClient(object):
|
||||
self._provider_tree.remove(rp_uuid)
|
||||
except ValueError:
|
||||
pass
|
||||
self.association_refresh_time.pop(rp_uuid, None)
|
||||
self._association_refresh_time.pop(rp_uuid, None)
|
||||
return
|
||||
|
||||
msg = ("[%(placement_req_id)s] Failed to delete resource provider "
|
||||
@@ -870,16 +870,16 @@ class SchedulerReportClient(object):
|
||||
self._refresh_associations(context, rp['uuid'],
|
||||
force=force,
|
||||
refresh_sharing=False)
|
||||
self.association_refresh_time[rp_uuid] = time.time()
|
||||
self._association_refresh_time[rp_uuid] = time.time()
|
||||
|
||||
def _associations_stale(self, uuid):
|
||||
"""Respond True if aggregates and traits have not been refreshed
|
||||
"recently".
|
||||
|
||||
It is old if association_refresh_time for this uuid is not set
|
||||
or more than ASSOCIATION_REFRESH seconds ago.
|
||||
Associations are stale if association_refresh_time for this uuid is not
|
||||
set or is more than ASSOCIATION_REFRESH seconds ago.
|
||||
"""
|
||||
refresh_time = self.association_refresh_time.get(uuid, 0)
|
||||
refresh_time = self._association_refresh_time.get(uuid, 0)
|
||||
return (time.time() - refresh_time) > ASSOCIATION_REFRESH
|
||||
|
||||
def _update_inventory_attempt(self, context, rp_uuid, inv_data):
|
||||
@@ -945,9 +945,8 @@ class SchedulerReportClient(object):
|
||||
# trigger a delete of the old allocation records and then set
|
||||
# the new inventory, and then set the allocation record to the
|
||||
# new CUSTOM_IRON_SILVER record.
|
||||
match = _RE_INV_IN_USE.search(result.text)
|
||||
if match:
|
||||
rc = match.group(1)
|
||||
rc = _extract_inventory_in_use(result.text)
|
||||
if rc is not None:
|
||||
raise exception.InventoryInUse(
|
||||
resource_classes=rc,
|
||||
resource_provider=rp_uuid,
|
||||
@@ -1172,9 +1171,8 @@ class SchedulerReportClient(object):
|
||||
if resp.status_code == 409:
|
||||
# If a conflict attempting to remove inventory in a resource class
|
||||
# with active allocations, raise InventoryInUse
|
||||
match = _RE_INV_IN_USE.search(resp.text)
|
||||
if match:
|
||||
rc = match.group(1)
|
||||
rc = _extract_inventory_in_use(resp.text)
|
||||
if rc is not None:
|
||||
raise exception.InventoryInUse(
|
||||
resource_classes=rc,
|
||||
resource_provider=rp_uuid,
|
||||
@@ -1450,7 +1448,7 @@ class SchedulerReportClient(object):
|
||||
self._provider_tree.remove(rp_uuid)
|
||||
except ValueError:
|
||||
pass
|
||||
self.association_refresh_time.pop(rp_uuid, None)
|
||||
self._association_refresh_time.pop(rp_uuid, None)
|
||||
|
||||
# Overall indicator of success. Will be set to False on any exception.
|
||||
success = True
|
||||
|
||||
@@ -92,6 +92,12 @@ class TestProviderTree(test.NoDBTestCase):
|
||||
uuids.non_existing_rp,
|
||||
)
|
||||
|
||||
# Fail attempting to add a child that already exists in the tree
|
||||
# Existing provider is a child; search by name
|
||||
self.assertRaises(ValueError, pt.new_child, 'numa_cell0', cn1.uuid)
|
||||
# Existing provider is a root; search by UUID
|
||||
self.assertRaises(ValueError, pt.new_child, cn1.uuid, cn2.uuid)
|
||||
|
||||
# Test data().
|
||||
# Root, by UUID
|
||||
cn1_snap = pt.data(cn1.uuid)
|
||||
@@ -325,12 +331,12 @@ class TestProviderTree(test.NoDBTestCase):
|
||||
# always comes after its parent (and by extension, its ancestors too).
|
||||
puuids = pt.get_provider_uuids()
|
||||
for desc in (uuids.child1, uuids.child2):
|
||||
self.assertTrue(puuids.index(desc) > puuids.index(uuids.root))
|
||||
self.assertGreater(puuids.index(desc), puuids.index(uuids.root))
|
||||
for desc in (uuids.grandchild1_1, uuids.grandchild1_2):
|
||||
self.assertTrue(puuids.index(desc) > puuids.index(uuids.child1))
|
||||
self.assertGreater(puuids.index(desc), puuids.index(uuids.child1))
|
||||
for desc in (uuids.ggc1_2_1, uuids.ggc1_2_2, uuids.ggc1_2_3):
|
||||
self.assertTrue(
|
||||
puuids.index(desc) > puuids.index(uuids.grandchild1_2))
|
||||
self.assertGreater(
|
||||
puuids.index(desc), puuids.index(uuids.grandchild1_2))
|
||||
|
||||
def test_populate_from_iterable_with_root_update(self):
|
||||
# Ensure we can update hierarchies, including adding children, in a
|
||||
|
||||
@@ -1924,7 +1924,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
delete_mock.status_code = status_code
|
||||
# Seed the caches
|
||||
self.client._provider_tree.new_root('compute', uuids.root, 0)
|
||||
self.client.association_refresh_time[uuids.root] = 1234
|
||||
self.client._association_refresh_time[uuids.root] = 1234
|
||||
|
||||
self.client._delete_provider(uuids.root, global_request_id='gri')
|
||||
|
||||
@@ -1933,7 +1933,7 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
headers={'X-Openstack-Request-Id': 'gri'}, microversion=None,
|
||||
raise_exc=False)
|
||||
self.assertFalse(self.client._provider_tree.exists(uuids.root))
|
||||
self.assertNotIn(uuids.root, self.client.association_refresh_time)
|
||||
self.assertNotIn(uuids.root, self.client._association_refresh_time)
|
||||
|
||||
self.ks_adap_mock.delete.reset_mock()
|
||||
|
||||
@@ -1977,10 +1977,15 @@ class TestProviderOperations(SchedulerReportClientTestCase):
|
||||
|
||||
def test_set_aggregates_for_provider_fail(self):
|
||||
self.ks_adap_mock.put.return_value = mock.Mock(status_code=503)
|
||||
# Prime the provider tree cache
|
||||
self.client._provider_tree.new_root('rp', uuids.rp, 0)
|
||||
self.assertRaises(
|
||||
exception.ResourceProviderUpdateFailed,
|
||||
self.client.set_aggregates_for_provider,
|
||||
self.context, uuids.rp, [])
|
||||
self.context, uuids.rp, [uuids.agg])
|
||||
# The cache wasn't updated
|
||||
self.assertEqual(set(),
|
||||
self.client._provider_tree.data(uuids.rp).aggregates)
|
||||
|
||||
|
||||
class TestAggregates(SchedulerReportClientTestCase):
|
||||
@@ -2024,7 +2029,7 @@ class TestAggregates(SchedulerReportClientTestCase):
|
||||
headers={'X-Openstack-Request-Id': self.context.global_id})
|
||||
self.assertTrue(log_mock.called)
|
||||
self.assertEqual(uuids.request_id,
|
||||
log_mock.call_args[0][1]['placement_req_id'])
|
||||
log_mock.call_args[0][1]['placement_req_id'])
|
||||
self.ks_adap_mock.get.reset_mock()
|
||||
log_mock.reset_mock()
|
||||
|
||||
@@ -2074,7 +2079,7 @@ class TestTraits(SchedulerReportClientTestCase):
|
||||
**self.trait_api_kwargs)
|
||||
self.assertTrue(log_mock.called)
|
||||
self.assertEqual(uuids.request_id,
|
||||
log_mock.call_args[0][1]['placement_req_id'])
|
||||
log_mock.call_args[0][1]['placement_req_id'])
|
||||
self.ks_adap_mock.get.reset_mock()
|
||||
log_mock.reset_mock()
|
||||
|
||||
@@ -2247,7 +2252,7 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||
mock_shr_get.assert_called_once_with(
|
||||
self.context, mock_agg_get.return_value)
|
||||
self.assertIn(uuid, self.client.association_refresh_time)
|
||||
self.assertIn(uuid, self.client._association_refresh_time)
|
||||
self.assertTrue(
|
||||
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
|
||||
self.assertFalse(
|
||||
@@ -2277,7 +2282,7 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
mock_agg_get.assert_called_once_with(self.context, uuid)
|
||||
mock_trait_get.assert_called_once_with(self.context, uuid)
|
||||
mock_shr_get.assert_not_called()
|
||||
self.assertIn(uuid, self.client.association_refresh_time)
|
||||
self.assertIn(uuid, self.client._association_refresh_time)
|
||||
self.assertTrue(
|
||||
self.client._provider_tree.in_aggregates(uuid, [uuids.agg1]))
|
||||
self.assertFalse(
|
||||
@@ -2306,7 +2311,7 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
mock_agg_get.assert_not_called()
|
||||
mock_trait_get.assert_not_called()
|
||||
mock_shr_get.assert_not_called()
|
||||
self.assertFalse(self.client.association_refresh_time)
|
||||
self.assertFalse(self.client._association_refresh_time)
|
||||
|
||||
@mock.patch.object(report.LOG, 'debug')
|
||||
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
|
||||
@@ -2337,7 +2342,7 @@ class TestAssociations(SchedulerReportClientTestCase):
|
||||
mock.call('Refreshing trait associations for resource '
|
||||
'provider %s, traits: %s', uuid, 'None')
|
||||
])
|
||||
self.assertIn(uuid, self.client.association_refresh_time)
|
||||
self.assertIn(uuid, self.client._association_refresh_time)
|
||||
|
||||
# Clear call count.
|
||||
mock_agg_get.reset_mock()
|
||||
@@ -3325,7 +3330,7 @@ class TestAllocations(SchedulerReportClientTestCase):
|
||||
def test_delete_resource_provider_no_cascade(self, mock_by_host,
|
||||
mock_del_alloc, mock_delete):
|
||||
self.client._provider_tree.new_root(uuids.cn, uuids.cn, 1)
|
||||
self.client.association_refresh_time[uuids.cn] = mock.Mock()
|
||||
self.client._association_refresh_time[uuids.cn] = mock.Mock()
|
||||
cn = objects.ComputeNode(uuid=uuids.cn, host="fake_host",
|
||||
hypervisor_hostname="fake_hostname", )
|
||||
inst1 = objects.Instance(uuid=uuids.inst1)
|
||||
@@ -3339,7 +3344,7 @@ class TestAllocations(SchedulerReportClientTestCase):
|
||||
exp_url = "/resource_providers/%s" % uuids.cn
|
||||
mock_delete.assert_called_once_with(
|
||||
exp_url, global_request_id=self.context.global_id)
|
||||
self.assertNotIn(uuids.cn, self.client.association_refresh_time)
|
||||
self.assertNotIn(uuids.cn, self.client._association_refresh_time)
|
||||
|
||||
@mock.patch("nova.scheduler.client.report.SchedulerReportClient."
|
||||
"delete")
|
||||
|
||||
@@ -862,7 +862,6 @@ class ComputeDriver(object):
|
||||
an advisory capacity to restrict changes to only the providers
|
||||
associated with that one node, but this is not a requirement: the
|
||||
caller always subsumes all changes regardless.
|
||||
:return: True if the provider_tree was changed; False otherwise.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user