libvirt: report VPMEM resources by provider tree
Report VPMEM resources by updating provider tree. Custom Resource Classes are used to represent PMEM namespace resource here. Currently, VPMEM inventories and resources belong to compute node ResourceProvider directly. Change-Id: Iaa63b9078950835f59345600754c5df580bdfc54 Partially-Implements: blueprint virtual-persistent-memory Co-Authored-By: He Jie Xu <hejie.xu@intel.com>
This commit is contained in:
parent
3978b99d8f
commit
d7c07584f1
@ -19246,10 +19246,12 @@ class TestUpdateProviderTree(test.NoDBTestCase):
|
||||
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_vcpu_total',
|
||||
return_value=vcpus)
|
||||
def _test_update_provider_tree(self, mock_vcpu, mock_mem, mock_disk,
|
||||
mock_gpu_invs, gpu_invs=None):
|
||||
mock_gpu_invs, gpu_invs=None, vpmems=None):
|
||||
if gpu_invs:
|
||||
self.flags(enabled_vgpu_types=['nvidia-11'], group='devices')
|
||||
mock_gpu_invs.return_value = gpu_invs
|
||||
if vpmems:
|
||||
self.driver._vpmems_by_rc = vpmems
|
||||
self.driver.update_provider_tree(self.pt,
|
||||
self.cn_rp['name'])
|
||||
|
||||
@ -19313,6 +19315,59 @@ class TestUpdateProviderTree(test.NoDBTestCase):
|
||||
'max_unit'] = inventory_dict['max_unit']
|
||||
self.assertEqual(pgpu_inventory, pgpu_provider_data.inventory)
|
||||
|
||||
def test_update_provider_tree_for_vpmem(self):
|
||||
rp_uuid = self.cn_rp['uuid']
|
||||
vpmem_0 = objects.LibvirtVPMEMDevice(label='4GB', name='ns_0',
|
||||
size=4292870144, devpath='/dev/dax0.0', align=2097152)
|
||||
vpmem_1 = objects.LibvirtVPMEMDevice(label='SMALL', name='ns_1',
|
||||
size=4292870144, devpath='/dev/dax0.1', align=2097152)
|
||||
vpmem_2 = objects.LibvirtVPMEMDevice(label='4GB', name='ns_2',
|
||||
size=4292870144, devpath='/dev/dax0.2', align=2097152)
|
||||
vpmems_by_rc = {
|
||||
'CUSTOM_PMEM_NAMESPACE_4GB': [vpmem_0],
|
||||
'CUSTOM_PMEM_NAMESPACE_SMALL': [vpmem_1, vpmem_2]
|
||||
}
|
||||
|
||||
self._test_update_provider_tree(vpmems=vpmems_by_rc)
|
||||
expected_inventory = self._get_inventory()
|
||||
expected_resources = {}
|
||||
expected_inventory["CUSTOM_PMEM_NAMESPACE_4GB"] = {
|
||||
'total': 1,
|
||||
'max_unit': 1,
|
||||
'min_unit': 1,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
'reserved': 0
|
||||
}
|
||||
expected_inventory["CUSTOM_PMEM_NAMESPACE_SMALL"] = {
|
||||
'total': 2,
|
||||
'max_unit': 2,
|
||||
'min_unit': 1,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
'reserved': 0
|
||||
}
|
||||
expected_resources["CUSTOM_PMEM_NAMESPACE_4GB"] = {
|
||||
objects.Resource(
|
||||
provider_uuid=rp_uuid,
|
||||
resource_class="CUSTOM_PMEM_NAMESPACE_4GB",
|
||||
identifier='ns_0', metadata=vpmem_0)
|
||||
}
|
||||
expected_resources["CUSTOM_PMEM_NAMESPACE_SMALL"] = {
|
||||
objects.Resource(
|
||||
provider_uuid=rp_uuid,
|
||||
resource_class="CUSTOM_PMEM_NAMESPACE_SMALL",
|
||||
identifier='ns_1', metadata=vpmem_1),
|
||||
objects.Resource(
|
||||
provider_uuid=rp_uuid,
|
||||
resource_class="CUSTOM_PMEM_NAMESPACE_SMALL",
|
||||
identifier='ns_2', metadata=vpmem_2)
|
||||
}
|
||||
self.assertEqual(expected_inventory,
|
||||
self.pt.data(self.cn_rp['uuid']).inventory)
|
||||
self.assertEqual(expected_resources,
|
||||
self.pt.data(self.cn_rp['uuid']).resources)
|
||||
|
||||
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_local_gb_info',
|
||||
return_value={'total': disk_gb})
|
||||
@mock.patch('nova.virt.libvirt.host.Host.get_memory_mb_total',
|
||||
|
@ -7038,6 +7038,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# otherwise.
|
||||
inv = provider_tree.data(nodename).inventory
|
||||
ratios = self._get_allocation_ratios(inv)
|
||||
resources = collections.defaultdict(set)
|
||||
result = {
|
||||
orc.VCPU: {
|
||||
'total': vcpus,
|
||||
@ -7093,7 +7094,11 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
self._update_provider_tree_for_vgpu(
|
||||
provider_tree, nodename, allocations=allocations)
|
||||
|
||||
self._update_provider_tree_for_vpmems(
|
||||
provider_tree, nodename, result, resources)
|
||||
|
||||
provider_tree.update_inventory(nodename, result)
|
||||
provider_tree.update_resources(nodename, resources)
|
||||
|
||||
traits = self._get_cpu_traits()
|
||||
# _get_cpu_traits returns a dict of trait names mapped to boolean
|
||||
@ -7108,6 +7113,27 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# so that spawn() or other methods can access it thru a getter
|
||||
self.provider_tree = copy.deepcopy(provider_tree)
|
||||
|
||||
def _update_provider_tree_for_vpmems(self, provider_tree, nodename,
|
||||
inventory, resources):
|
||||
"""Update resources and inventory for vpmems in provider tree."""
|
||||
prov_data = provider_tree.data(nodename)
|
||||
for rc, vpmems in self._vpmems_by_rc.items():
|
||||
inventory[rc] = {
|
||||
'total': len(vpmems),
|
||||
'max_unit': len(vpmems),
|
||||
'min_unit': 1,
|
||||
'step_size': 1,
|
||||
'allocation_ratio': 1.0,
|
||||
'reserved': 0
|
||||
}
|
||||
for vpmem in vpmems:
|
||||
resource_obj = objects.Resource(
|
||||
provider_uuid=prov_data.uuid,
|
||||
resource_class=rc,
|
||||
identifier=vpmem.name,
|
||||
metadata=vpmem)
|
||||
resources[rc].add(resource_obj)
|
||||
|
||||
def _get_memory_encrypted_slots(self):
|
||||
slots = CONF.libvirt.num_memory_encrypted_guests
|
||||
if not self._host.supports_amd_sev:
|
||||
|
Loading…
x
Reference in New Issue
Block a user