trivial: omit condition evaluations

This patch refactors libvirt driver with the following strategies;

1. We can omit the else to save indents in the following function.

------
function():
  if A:
    return B
  else:
    ...long codes...
    return C
------

2. We can take the latter for the following code blocks.

------
if A:
  ...long codes...
  return B

else:
  return C
------
------
if not A:
  return C

...long codes...
return B
------

Change-Id: I06f2078a8c98d35841bcdd17625e8b6e07e8c95c
This commit is contained in:
Tetsuro Nakamura 2018-02-16 18:14:49 +09:00
parent 1ecd2d9f77
commit c67c74db45
1 changed files with 71 additions and 81 deletions

View File

@ -4414,99 +4414,89 @@ class LibvirtDriver(driver.ComputeDriver):
# across NUMA nodes and expose the topology to the # across NUMA nodes and expose the topology to the
# instance as an optimisation # instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None) return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology:
# Now get configuration from the numa_topology
# Init CPUTune configuration
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_cpu_tune.emulatorpin = (
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin())
guest_cpu_tune.emulatorpin.cpuset = set([])
# Init NUMATune configuration if not topology:
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune() # No NUMA topology defined for host - This will only happen with
guest_numa_tune.memory = ( # some libvirt versions and certain platforms.
vconfig.LibvirtConfigGuestNUMATuneMemory()) return GuestNumaConfig(allowed_cpus, None,
guest_numa_tune.memnodes = [] guest_cpu_numa_config, None)
emulator_threads_isolated = ( # Now get configuration from the numa_topology
instance_numa_topology.emulator_threads_isolated) # Init CPUTune configuration
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_cpu_tune.emulatorpin = (
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin())
guest_cpu_tune.emulatorpin.cpuset = set([])
# Set realtime scheduler for CPUTune # Init NUMATune configuration
vcpus_rt = set([]) guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
wants_realtime = hardware.is_realtime_enabled(flavor) guest_numa_tune.memory = vconfig.LibvirtConfigGuestNUMATuneMemory()
if wants_realtime: guest_numa_tune.memnodes = []
if not self._host.has_min_version(
MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported()
vcpus_rt = hardware.vcpus_realtime_topology(
flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
designer.set_vcpu_realtime_scheduler(
vcpusched, vcpus_rt,
CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched)
cell_pairs = self._get_cell_pairs(guest_cpu_numa_config, emulator_threads_isolated = (
topology) instance_numa_topology.emulator_threads_isolated)
for guest_node_id, (guest_config_cell, host_cell) in enumerate(
cell_pairs):
# set NUMATune for the cell
tnode = vconfig.LibvirtConfigGuestNUMATuneMemNode()
designer.set_numa_memnode(
tnode, guest_node_id, host_cell.id)
guest_numa_tune.memnodes.append(tnode)
guest_numa_tune.memory.nodeset.append(host_cell.id) # Set realtime scheduler for CPUTune
vcpus_rt = set([])
wants_realtime = hardware.is_realtime_enabled(flavor)
if wants_realtime:
if not self._host.has_min_version(MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported()
vcpus_rt = hardware.vcpus_realtime_topology(flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
designer.set_vcpu_realtime_scheduler(
vcpusched, vcpus_rt, CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched)
# set CPUTune for the cell cell_pairs = self._get_cell_pairs(guest_cpu_numa_config, topology)
object_numa_cell = instance_numa_topology.cells[ for guest_node_id, (guest_config_cell, host_cell) in enumerate(
guest_node_id] cell_pairs):
for cpu in guest_config_cell.cpus: # set NUMATune for the cell
pin_cpuset = self._get_pin_cpuset( tnode = vconfig.LibvirtConfigGuestNUMATuneMemNode()
cpu, object_numa_cell, host_cell) designer.set_numa_memnode(tnode, guest_node_id, host_cell.id)
guest_cpu_tune.vcpupin.append(pin_cpuset) guest_numa_tune.memnodes.append(tnode)
guest_numa_tune.memory.nodeset.append(host_cell.id)
emu_pin_cpuset = self._get_emulatorpin_cpuset( # set CPUTune for the cell
cpu, object_numa_cell, vcpus_rt, object_numa_cell = instance_numa_topology.cells[guest_node_id]
emulator_threads_isolated, for cpu in guest_config_cell.cpus:
wants_realtime, pin_cpuset) pin_cpuset = self._get_pin_cpuset(cpu, object_numa_cell,
guest_cpu_tune.emulatorpin.cpuset.update( host_cell)
emu_pin_cpuset) guest_cpu_tune.vcpupin.append(pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will emu_pin_cpuset = self._get_emulatorpin_cpuset(
# span multiple host NUMA nodes. By pinning emulator threads cpu, object_numa_cell, vcpus_rt,
# to the union of all nodes, we guarantee there will be emulator_threads_isolated, wants_realtime, pin_cpuset)
# cross-node memory access by the emulator threads when guest_cpu_tune.emulatorpin.cpuset.update(emu_pin_cpuset)
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
# Sort the vcpupin list per vCPU id for human-friendlier XML # TODO(berrange) When the guest has >1 NUMA node, it will
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id")) # span multiple host NUMA nodes. By pinning emulator threads
# to the union of all nodes, we guarantee there will be
# cross-node memory access by the emulator threads when
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
# normalize cell.id # Sort the vcpupin list per vCPU id for human-friendlier XML
for i, (cell, memnode) in enumerate( guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
zip(guest_cpu_numa_config.cells,
# normalize cell.id
for i, (cell, memnode) in enumerate(zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)): guest_numa_tune.memnodes)):
cell.id = i cell.id = i
memnode.cellid = i memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune, return GuestNumaConfig(None, guest_cpu_tune, guest_cpu_numa_config,
guest_cpu_numa_config, guest_numa_tune)
guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type): def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type.""" """Returns the guest OS type based on virt type."""