Remove extensible resource tracking

The extensible resource tracker plugins were deprecated in
Ie4b6670a828d99aebfd348bd700f9f58b3e39fdb - it is now time
to remove the feature. Providers will no longer be able
to extend the resource types they want to track.

The compute node stats field is now updated every time
stats change (as it originally was before ERT). Some tests
have had stats added to the expected field in assertions
to account for this information.

Change-Id: Ia34ca9160ca34bcfa3ed56395e1b68ca802c0c51
This commit is contained in:
Paul Murray 2016-03-31 11:39:33 +01:00
parent c368a3e802
commit 49d9433c62
11 changed files with 55 additions and 584 deletions

View File

@ -150,7 +150,6 @@ class Claim(NopClaim):
self._test_vcpus(resources, vcpus_limit),
self._test_numa_topology(resources, numa_topology_limit),
self._test_pci()]
reasons = reasons + self._test_ext_resources(limits)
reasons = [r for r in reasons if r is not None]
if len(reasons) > 0:
raise exception.ComputeResourcesUnavailable(reason=
@ -194,10 +193,6 @@ class Claim(NopClaim):
if not stats.support_requests(pci_requests.requests):
return _('Claim pci failed.')
def _test_ext_resources(self, limits):
return self.tracker.ext_resources_handler.test_resources(
self.instance, limits)
def _test_numa_topology(self, resources, limit):
host_topology = (resources.numa_topology
if 'numa_topology' in resources else None)
@ -304,10 +299,6 @@ class MoveClaim(Claim):
if not claim:
return _('Claim pci failed.')
def _test_ext_resources(self, limits):
return self.tracker.ext_resources_handler.test_resources(
self.instance_type, limits)
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.

View File

@ -26,7 +26,6 @@ from oslo_utils import importutils
from nova.compute import claims
from nova.compute import monitors
from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
import nova.conf
@ -85,8 +84,6 @@ class ResourceTracker(object):
self.tracked_migrations = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.ext_resources_handler = \
ext_resources.ResourceHandler(CONF.compute_resources)
self.old_resources = objects.ComputeNode()
self.scheduler_client = scheduler_client.SchedulerClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
@ -390,6 +387,7 @@ class ResourceTracker(object):
# purge old stats and init with anything passed in by the driver
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
self.compute_node.stats = copy.deepcopy(self.stats)
# update the allocation ratios for the related ComputeNode object
self.compute_node.ram_allocation_ratio = self.ram_allocation_ratio
@ -546,10 +544,6 @@ class ResourceTracker(object):
LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
{'host': self.host, 'node': self.nodename})
def _write_ext_resources(self, resources):
resources.stats = copy.deepcopy(self.stats)
self.ext_resources_handler.write_resources(resources)
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
@ -637,7 +631,6 @@ class ResourceTracker(object):
def _update(self, context):
"""Update partial stats locally and populate them to Scheduler."""
self._write_ext_resources(self.compute_node)
if not self._resource_change():
return
# Persist the stats to the Scheduler
@ -663,7 +656,6 @@ class ResourceTracker(object):
self.compute_node.local_gb_used)
self.compute_node.running_vms = self.stats.num_instances
self.ext_resources_handler.update_from_instance(usage, sign)
# Calculate the numa usage
free = sign == -1
@ -820,6 +812,7 @@ class ResourceTracker(object):
sign = -1
self.stats.update_stats_for_instance(instance, is_removed_instance)
self.compute_node.stats = copy.deepcopy(self.stats)
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
@ -856,10 +849,6 @@ class ResourceTracker(object):
self.compute_node.current_workload = 0
self.compute_node.running_vms = 0
# Reset values for extended resources
self.ext_resources_handler.reset_resources(self.compute_node,
self.driver)
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance)

View File

@ -1,142 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import stevedore
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
RESOURCE_NAMESPACE = 'nova.compute.resources'
class ResourceHandler(object):
def _log_missing_plugins(self, names):
for name in names:
if name not in self._mgr.names():
LOG.warning(_LW('Compute resource plugin %s was not loaded'),
name)
def __init__(self, names, propagate_map_exceptions=False):
"""Initialise the resource handler by loading the plugins.
The ResourceHandler uses stevedore to load the resource plugins.
The handler can handle and report exceptions raised in the plugins
depending on the value of the propagate_map_exceptions parameter.
It is useful in testing to propagate exceptions so they are exposed
as part of the test. If exceptions are not propagated they are
logged at error level.
Any named plugins that are not located are logged.
:param names: the list of plugins to load by name
:param propagate_map_exceptions: True indicates exceptions in the
plugins should be raised, False indicates they should be handled and
logged.
"""
self._mgr = stevedore.NamedExtensionManager(
namespace=RESOURCE_NAMESPACE,
names=names,
propagate_map_exceptions=propagate_map_exceptions,
invoke_on_load=True)
if self._mgr.names():
LOG.warning(_LW(
'The Extensible Resource Tracker is deprecated and will '
'be removed in the 14.0.0 release. If you '
'use this functionality and have custom resources that '
'are managed by the Extensible Resource Tracker, please '
'contact the Nova development team by posting to the '
'openstack-dev mailing list. There is no future planned '
'support for the tracking of custom resources.'))
self._log_missing_plugins(names)
def reset_resources(self, resources, driver):
"""Reset the resources to their initial state.
Each plugin is called to reset its state. The resources data provided
is initial state gathered from the hypervisor. The driver is also
provided in case the plugin needs to obtain additional information
from the driver, for example, the memory calculation obtains
the memory overhead from the driver.
:param resources: the resources reported by the hypervisor
:param driver: the driver for the hypervisor
:returns: None
"""
if self._mgr.extensions:
self._mgr.map_method('reset', resources, driver)
def test_resources(self, usage, limits):
"""Test the ability to support the given instance.
Each resource plugin is called to determine if it's resource is able
to support the additional requirements of a new instance. The
plugins either return None to indicate they have sufficient resource
available or a human readable string to indicate why they can not.
:param usage: the additional resource usage
:param limits: limits used for the calculation
:returns: a list or return values from the plugins
"""
if not self._mgr.extensions:
return []
reasons = self._mgr.map_method('test', usage, limits)
return reasons
def update_from_instance(self, usage, sign=1):
"""Update the resource information to reflect the allocation for
an instance with the given resource usage.
:param usage: the resource usage of the instance
:param sign: has value 1 or -1. 1 indicates the instance is being
added to the current usage, -1 indicates the instance is being removed.
:returns: None
"""
if not self._mgr.extensions:
return
if sign == 1:
self._mgr.map_method('add_instance', usage)
else:
self._mgr.map_method('remove_instance', usage)
def write_resources(self, resources):
"""Write the resource data to populate the resources.
Each resource plugin is called to write its resource data to
resources.
:param resources: the compute node resources
:returns: None
"""
if self._mgr.extensions:
self._mgr.map_method('write', resources)
def report_free_resources(self):
"""Each resource plugin is called to log free resource information.
:returns: None
"""
if not self._mgr.extensions:
return
self._mgr.map_method('report_free')

View File

@ -1,93 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Resource(object):
"""This base class defines the interface used for compute resource
plugins. It is not necessary to use this base class, but all compute
resource plugins must implement the abstract methods found here.
An instance of the plugin object is instantiated when it is loaded
by calling __init__() with no parameters.
"""
@abc.abstractmethod
def reset(self, resources, driver):
"""Set the resource to an initial state based on the resource
view discovered from the hypervisor.
"""
pass
@abc.abstractmethod
def test(self, usage, limits):
"""Test to see if we have sufficient resources to allocate for
an instance with the given resource usage.
:param usage: the resource usage of the instances
:param limits: limits to apply
:returns: None if the test passes or a string describing the reason
why the test failed
"""
pass
@abc.abstractmethod
def add_instance(self, usage):
"""Update resource information adding allocation according to the
given resource usage.
:param usage: the resource usage of the instance being added
:returns: None
"""
pass
@abc.abstractmethod
def remove_instance(self, usage):
"""Update resource information removing allocation according to the
given resource usage.
:param usage: the resource usage of the instance being removed
:returns: None
"""
pass
@abc.abstractmethod
def write(self, resources):
"""Write resource data to populate resources.
:param resources: the resources data to be populated
:returns: None
"""
pass
@abc.abstractmethod
def report_free(self):
"""Log free resources.
This method logs how much free resource is held by
the resource plugin.
:returns: None
"""
pass

View File

@ -88,18 +88,6 @@ resource_tracker_opts = [
help='DEPRECATED: Class that will manage stats for the '
'local compute host',
deprecated_for_removal=True),
cfg.ListOpt('compute_resources',
default=[],
help='DEPRECATED:The names of the extra resources to track. '
'The Extensible Resource Tracker is deprecated and will '
'be removed in the 14.0.0 release. If you '
'use this functionality and have custom resources that '
'are managed by the Extensible Resource Tracker, please '
'contact the Nova development team by posting to the '
'openstack-dev mailing list. There is no future planned '
'support for the tracking of custom resources.',
deprecated_for_removal=True),
]
allocation_ratio_opts = [

View File

@ -20,4 +20,4 @@ class FakeResourceTracker(resource_tracker.ResourceTracker):
"""Version without a DB requirement."""
def _update(self, context):
self._write_ext_resources(self.compute_node)
pass

View File

@ -42,7 +42,6 @@ class FakeResourceHandler(object):
class DummyTracker(object):
icalled = False
rcalled = False
ext_resources_handler = FakeResourceHandler()
def __init__(self):
self.new_pci_tracker()
@ -227,11 +226,6 @@ class ClaimTestCase(test.NoDBTestCase):
self._claim()
self.assertFalse(mock_supports.called)
def test_ext_resources(self):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
def test_numa_topology_no_limit(self):
huge_instance = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
@ -395,11 +389,6 @@ class MoveClaimTestCase(ClaimTestCase):
overhead=overhead, limits=limits)
return get_claim()
def test_ext_resources(self):
self._claim()
self.assertTrue(self.tracker.ext_resources_handler.test_called)
self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)

View File

@ -27,7 +27,6 @@ import six
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
@ -64,7 +63,6 @@ FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
@ -451,8 +449,6 @@ class BaseTestCase(test.TestCase):
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
@ -812,32 +808,6 @@ class TrackerPciStatsTestCase(BaseTrackerTestCase):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources.stats['resA'] = '123'
resources.stats['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024

View File

@ -1,271 +0,0 @@
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the compute extra resources framework."""
from oslo_config import cfg
from stevedore import extension
from stevedore import named
from nova.compute import resources
from nova.compute.resources import base
from nova import context
from nova.objects import flavor as flavor_obj
from nova import test
CONF = cfg.CONF
class FakeResourceHandler(resources.ResourceHandler):
def __init__(self, extensions):
self._mgr = \
named.NamedExtensionManager.make_test_instance(extensions)
class FakeResource(base.Resource):
def __init__(self):
self.total_res = 0
self.used_res = 0
def _get_requested(self, usage):
if 'extra_specs' not in usage:
return
if self.resource_name not in usage['extra_specs']:
return
req = usage['extra_specs'][self.resource_name]
return int(req)
def _get_limit(self, limits):
if self.resource_name not in limits:
return
limit = limits[self.resource_name]
return int(limit)
def reset(self, resources, driver):
self.total_res = 0
self.used_res = 0
def test(self, usage, limits):
requested = self._get_requested(usage)
if not requested:
return
limit = self._get_limit(limits)
if not limit:
return
free = limit - self.used_res
if requested <= free:
return
else:
return ('Free %(free)d < requested %(requested)d ' %
{'free': free, 'requested': requested})
def add_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res += requested
def remove_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res -= requested
def write(self, resources):
pass
def report_free(self):
return "Free %s" % (self.total_res - self.used_res)
class ResourceA(FakeResource):
def reset(self, resources, driver):
# ResourceA uses a configuration option
self.total_res = int(CONF.resA)
self.used_res = 0
self.resource_name = 'resource:resA'
def write(self, resources):
resources['resA'] = self.total_res
resources['used_resA'] = self.used_res
class ResourceB(FakeResource):
def reset(self, resources, driver):
# ResourceB uses resource details passed in parameter resources
self.total_res = resources['resB']
self.used_res = 0
self.resource_name = 'resource:resB'
def write(self, resources):
resources['resB'] = self.total_res
resources['used_resB'] = self.used_res
def fake_flavor_obj(**updates):
flavor = flavor_obj.Flavor()
flavor.id = 1
flavor.name = 'fakeflavor'
flavor.memory_mb = 8000
flavor.vcpus = 3
flavor.root_gb = 11
flavor.ephemeral_gb = 4
flavor.swap = 0
flavor.rxtx_factor = 1.0
flavor.vcpu_weight = 1
if updates:
flavor.update(updates)
return flavor
class BaseTestCase(test.NoDBTestCase):
def _initialize_used_res_counter(self):
# Initialize the value for the used resource
for ext in self.r_handler._mgr.extensions:
ext.obj.used_res = 0
def setUp(self):
super(BaseTestCase, self).setUp()
# initialize flavors and stub get_by_id to
# get flavors from here
self._flavors = {}
self.ctxt = context.get_admin_context()
# Create a flavor without extra_specs defined
_flavor_id = 1
_flavor = fake_flavor_obj(id=_flavor_id)
self._flavors[_flavor_id] = _flavor
# Create a flavor with extra_specs defined
_flavor_id = 2
requested_resA = 5
requested_resB = 7
requested_resC = 7
_extra_specs = {'resource:resA': requested_resA,
'resource:resB': requested_resB,
'resource:resC': requested_resC}
_flavor = fake_flavor_obj(id=_flavor_id,
extra_specs=_extra_specs)
self._flavors[_flavor_id] = _flavor
# create fake resource extensions and resource handler
_extensions = [
extension.Extension('resA', None, ResourceA, ResourceA()),
extension.Extension('resB', None, ResourceB, ResourceB()),
]
self.r_handler = FakeResourceHandler(_extensions)
# Resources details can be passed to each plugin or can be specified as
# configuration options
driver_resources = {'resB': 5}
CONF.resA = '10'
# initialise the resources
self.r_handler.reset_resources(driver_resources, None)
def test_update_from_instance_with_extra_specs(self):
# Flavor with extra_specs
_flavor_id = 2
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
self.assertEqual(int(expected_resA),
self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(int(expected_resB),
self.r_handler._mgr['resB'].obj.used_res)
def test_update_from_instance_without_extra_specs(self):
# Flavor id without extra spec
_flavor_id = 1
self._initialize_used_res_counter()
self.r_handler.resource_list = []
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
def test_write_resources(self):
self._initialize_used_res_counter()
extra_resources = {}
expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
self.r_handler.write_resources(extra_resources)
self.assertEqual(expected, extra_resources)
def test_test_resources_without_extra_specs(self):
limits = {}
# Flavor id without extra_specs
flavor = self._flavors[1]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_test_resources_with_limits_for_different_resource(self):
limits = {'resource:resC': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_passing_test_resources(self):
limits = {'resource:resA': 10, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_failing_test_resources_for_single_resource(self):
limits = {'resource:resA': 4, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
expected = ['Free 4 < requested 5 ', None]
self.assertEqual(sorted(expected, key=str),
sorted(result, key=str))
def test_empty_resource_handler(self):
"""An empty resource handler has no resource extensions,
should have no effect, and should raise no exceptions.
"""
empty_r_handler = FakeResourceHandler([])
resources = {}
empty_r_handler.reset_resources(resources, None)
flavor = self._flavors[1]
sign = 1
empty_r_handler.update_from_instance(flavor, sign)
limits = {}
test_result = empty_r_handler.test_resources(flavor, limits)
self.assertEqual([], test_result)
sign = -1
empty_r_handler.update_from_instance(flavor, sign)
extra_resources = {}
expected_extra_resources = extra_resources
empty_r_handler.write_resources(extra_resources)
self.assertEqual(expected_extra_resources, extra_resources)
empty_r_handler.report_free_resources()

View File

@ -1035,6 +1035,7 @@ class TestInitComputeNode(BaseTestCase):
ram_allocation_ratio=ram_alloc_ratio,
cpu_allocation_ratio=cpu_alloc_ratio,
disk_allocation_ratio=disk_alloc_ratio,
stats={},
)
# Forcing the flags to the values we know
@ -1227,6 +1228,14 @@ class TestInstanceClaim(BaseTestCase):
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
@ -1253,6 +1262,14 @@ class TestInstanceClaim(BaseTestCase):
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
@ -1263,7 +1280,18 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_node))
expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_updated.pci_device_pools = objects.PciDevicePoolList()
vals = {
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 0,
'num_task_None': 0,
'num_os_type_' + self.instance.os_type: 0,
'num_proj_' + self.instance.project_id: 0,
'num_vm_' + self.instance.vm_state: 0,
},
}
_update_compute_node(expected_updated, **vals)
self.instance.vm_state = vm_states.SHELVED_OFFLOADED
with mock.patch.object(self.rt, '_update') as update_mock:
@ -1288,6 +1316,14 @@ class TestInstanceClaim(BaseTestCase):
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
@ -1329,7 +1365,15 @@ class TestInstanceClaim(BaseTestCase):
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': pci_pools
'pci_device_pools': pci_pools,
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:

View File

@ -0,0 +1,6 @@
---
upgrade:
- The extensible resource tracker was deprecated in the 13.0.0 release and
has now been removed. Custom resources in the nova.compute.resources
namespace selected by the compute_resources configuration parameter will
not be loaded.