Helper to start computes with different HostInfos
Sometimes functional tests need to start multiple compute hosts with
different HostInfo objects (representing, for example, different NUMA
topologies). In the future this will also be needed by both NUMA-aware
live migration functional tests and a regression test for bug 1843639.
This patch adds a helper function to the base libvirt function test
class that takes a hostname to HostInfo dict and starts a compute for
each host. Existing tests that can make use of this new helper are
refactored.
Change-Id: Id3f77c4ecccfdc4caa6dbf120c3df4fbdfce9d0f
(cherry picked from commit 607307c1d8
)
This commit is contained in:
parent
0668bc1a86
commit
29ee5984f8
@ -115,6 +115,59 @@ class ServersTestBase(base.ServersTestBase):
|
|||||||
hostname=hostname)
|
hostname=hostname)
|
||||||
return fake_connection
|
return fake_connection
|
||||||
|
|
||||||
|
def start_computes(self, host_info_dict=None, save_rp_uuids=False):
|
||||||
|
"""Start compute services. The started services will be saved in
|
||||||
|
self.computes, keyed by hostname.
|
||||||
|
|
||||||
|
:param host_info_dict: A hostname -> fakelibvirt.HostInfo object
|
||||||
|
dictionary representing the libvirt HostInfo of
|
||||||
|
each compute host. If None, the default is to
|
||||||
|
start 2 computes, named test_compute0 and
|
||||||
|
test_compute1, with 2 NUMA nodes, 2 cores per
|
||||||
|
node, 2 threads per core, and 16GB of RAM.
|
||||||
|
:param save_rp_uuids: If True, save the resource provider UUID of each
|
||||||
|
started compute in self.compute_rp_uuids, keyed
|
||||||
|
by hostname.
|
||||||
|
"""
|
||||||
|
if host_info_dict is None:
|
||||||
|
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
|
||||||
|
cpu_cores=2, cpu_threads=2,
|
||||||
|
kB_mem=15740000)
|
||||||
|
host_info_dict = {'test_compute0': host_info,
|
||||||
|
'test_compute1': host_info}
|
||||||
|
|
||||||
|
def start_compute(host, host_info):
|
||||||
|
fake_connection = self._get_connection(host_info=host_info,
|
||||||
|
hostname=host)
|
||||||
|
# This is fun. Firstly we need to do a global'ish mock so we can
|
||||||
|
# actually start the service.
|
||||||
|
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
||||||
|
return_value=fake_connection):
|
||||||
|
compute = self.start_service('compute', host=host)
|
||||||
|
# Once that's done, we need to tweak the compute "service" to
|
||||||
|
# make sure it returns unique objects. We do this inside the
|
||||||
|
# mock context to avoid a small window between the end of the
|
||||||
|
# context and the tweaking where get_connection would revert to
|
||||||
|
# being an autospec mock.
|
||||||
|
compute.driver._host.get_connection = lambda: fake_connection
|
||||||
|
return compute
|
||||||
|
|
||||||
|
self.computes = {}
|
||||||
|
self.compute_rp_uuids = {}
|
||||||
|
for host, host_info in host_info_dict.items():
|
||||||
|
# NOTE(artom) A lambda: foo construct returns the value of foo at
|
||||||
|
# call-time, so if the value of foo changes with every iteration of
|
||||||
|
# a loop, every call to the lambda will return a different value of
|
||||||
|
# foo. Because that's not what we want in our lambda further up,
|
||||||
|
# we can't put it directly in the for loop, and need to introduce
|
||||||
|
# the start_compute function to create a scope in which host and
|
||||||
|
# host_info do not change with every iteration of the for loop.
|
||||||
|
self.computes[host] = start_compute(host, host_info)
|
||||||
|
if save_rp_uuids:
|
||||||
|
self.compute_rp_uuids[host] = self.placement_api.get(
|
||||||
|
'/resource_providers?name=%s' % host).body[
|
||||||
|
'resource_providers'][0]['uuid']
|
||||||
|
|
||||||
|
|
||||||
class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
|
class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
|
||||||
"""A custom variant of the stock neutron fixture with more networks.
|
"""A custom variant of the stock neutron fixture with more networks.
|
||||||
|
@ -548,32 +548,8 @@ class NUMAServersTest(NUMAServersTestBase):
|
|||||||
group='compute')
|
group='compute')
|
||||||
self.flags(vcpu_pin_set=None)
|
self.flags(vcpu_pin_set=None)
|
||||||
|
|
||||||
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
|
|
||||||
cpu_cores=2, cpu_threads=2,
|
|
||||||
kB_mem=15740000)
|
|
||||||
|
|
||||||
# Start services
|
# Start services
|
||||||
self.computes = {}
|
self.start_computes(save_rp_uuids=True)
|
||||||
self.compute_rp_uuids = {}
|
|
||||||
for host in ['test_compute0', 'test_compute1']:
|
|
||||||
fake_connection = self._get_connection(
|
|
||||||
host_info=host_info, hostname=host)
|
|
||||||
|
|
||||||
# This is fun. Firstly we need to do a global'ish mock so we can
|
|
||||||
# actually start the service.
|
|
||||||
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
|
||||||
return_value=fake_connection):
|
|
||||||
compute = self.start_service('compute', host=host)
|
|
||||||
|
|
||||||
# Once that's done, we need to do some tweaks to each individual
|
|
||||||
# compute "service" to make sure they return unique objects
|
|
||||||
compute.driver._host.get_connection = lambda: fake_connection
|
|
||||||
self.computes[host] = compute
|
|
||||||
|
|
||||||
# and save the UUIDs for the corresponding resource providers
|
|
||||||
self.compute_rp_uuids[host] = self.placement_api.get(
|
|
||||||
'/resource_providers?name=%s' % host).body[
|
|
||||||
'resource_providers'][0]['uuid']
|
|
||||||
|
|
||||||
# Create server
|
# Create server
|
||||||
flavor_a_id = self._create_flavor(extra_spec={})
|
flavor_a_id = self._create_flavor(extra_spec={})
|
||||||
@ -718,27 +694,7 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
|
|||||||
kB_mem=15740000)
|
kB_mem=15740000)
|
||||||
|
|
||||||
# Start services
|
# Start services
|
||||||
self.computes = {}
|
self.start_computes(save_rp_uuids=True)
|
||||||
self.compute_rp_uuids = {}
|
|
||||||
for host in ['test_compute0', 'test_compute1']:
|
|
||||||
fake_connection = self._get_connection(
|
|
||||||
host_info=host_info, hostname=host)
|
|
||||||
|
|
||||||
# This is fun. Firstly we need to do a global'ish mock so we can
|
|
||||||
# actually start the service.
|
|
||||||
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
|
||||||
return_value=fake_connection):
|
|
||||||
compute = self.start_service('compute', host=host)
|
|
||||||
|
|
||||||
# Once that's done, we need to do some tweaks to each individual
|
|
||||||
# compute "service" to make sure they return unique objects
|
|
||||||
compute.driver._host.get_connection = lambda: fake_connection
|
|
||||||
self.computes[host] = compute
|
|
||||||
|
|
||||||
# and save the UUIDs for the corresponding resource providers
|
|
||||||
self.compute_rp_uuids[host] = self.placement_api.get(
|
|
||||||
'/resource_providers?name=%s' % host).body[
|
|
||||||
'resource_providers'][0]['uuid']
|
|
||||||
|
|
||||||
# ensure there is no PCPU inventory being reported
|
# ensure there is no PCPU inventory being reported
|
||||||
|
|
||||||
@ -1135,26 +1091,9 @@ class NUMAServersWithNetworksTest(NUMAServersTestBase):
|
|||||||
self.assertIn('NoValidHost', six.text_type(ex))
|
self.assertIn('NoValidHost', six.text_type(ex))
|
||||||
|
|
||||||
def test_cold_migrate_with_physnet(self):
|
def test_cold_migrate_with_physnet(self):
|
||||||
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
|
|
||||||
cpu_cores=2, cpu_threads=2,
|
|
||||||
kB_mem=15740000)
|
|
||||||
|
|
||||||
# Start services
|
# Start services
|
||||||
self.computes = {}
|
self.start_computes(save_rp_uuids=True)
|
||||||
for host in ['test_compute0', 'test_compute1']:
|
|
||||||
fake_connection = self._get_connection(
|
|
||||||
host_info=host_info, hostname=host)
|
|
||||||
|
|
||||||
# This is fun. Firstly we need to do a global'ish mock so we can
|
|
||||||
# actually start the service.
|
|
||||||
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
|
|
||||||
return_value=fake_connection):
|
|
||||||
compute = self.start_service('compute', host=host)
|
|
||||||
|
|
||||||
# Once that's done, we need to do some tweaks to each individual
|
|
||||||
# compute "service" to make sure they return unique objects
|
|
||||||
compute.driver._host.get_connection = lambda: fake_connection
|
|
||||||
self.computes[host] = compute
|
|
||||||
|
|
||||||
# Create server
|
# Create server
|
||||||
extra_spec = {'hw:numa_nodes': '1'}
|
extra_spec = {'hw:numa_nodes': '1'}
|
||||||
|
Loading…
Reference in New Issue
Block a user