trivial: omit condition evaluations

This patch refactors libvirt driver with the following strategies;

1. We can omit the else to save indents in the following function.

------
function():
  if A:
    return B
  else:
    ...long codes...
    return C
------

2. We can take the latter for the following code blocks.

------
if A:
  ...long codes...
  return B

else:
  return C
------
------
if not A:
  return C

...long codes...
return B
------

Change-Id: I06f2078a8c98d35841bcdd17625e8b6e07e8c95c
This commit is contained in:
Tetsuro Nakamura 2018-02-16 18:14:49 +09:00
parent 1ecd2d9f77
commit c67c74db45
1 changed files with 71 additions and 81 deletions

View File

@ -4414,8 +4414,13 @@ class LibvirtDriver(driver.ComputeDriver):
# across NUMA nodes and expose the topology to the # across NUMA nodes and expose the topology to the
# instance as an optimisation # instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None) return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology: if not topology:
# No NUMA topology defined for host - This will only happen with
# some libvirt versions and certain platforms.
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
# Now get configuration from the numa_topology # Now get configuration from the numa_topology
# Init CPUTune configuration # Init CPUTune configuration
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune() guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
@ -4425,8 +4430,7 @@ class LibvirtDriver(driver.ComputeDriver):
# Init NUMATune configuration # Init NUMATune configuration
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune() guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
guest_numa_tune.memory = ( guest_numa_tune.memory = vconfig.LibvirtConfigGuestNUMATuneMemory()
vconfig.LibvirtConfigGuestNUMATuneMemory())
guest_numa_tune.memnodes = [] guest_numa_tune.memnodes = []
emulator_threads_isolated = ( emulator_threads_isolated = (
@ -4436,43 +4440,34 @@ class LibvirtDriver(driver.ComputeDriver):
vcpus_rt = set([]) vcpus_rt = set([])
wants_realtime = hardware.is_realtime_enabled(flavor) wants_realtime = hardware.is_realtime_enabled(flavor)
if wants_realtime: if wants_realtime:
if not self._host.has_min_version( if not self._host.has_min_version(MIN_LIBVIRT_REALTIME_VERSION):
MIN_LIBVIRT_REALTIME_VERSION):
raise exception.RealtimePolicyNotSupported() raise exception.RealtimePolicyNotSupported()
vcpus_rt = hardware.vcpus_realtime_topology( vcpus_rt = hardware.vcpus_realtime_topology(flavor, image_meta)
flavor, image_meta)
vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched() vcpusched = vconfig.LibvirtConfigGuestCPUTuneVCPUSched()
designer.set_vcpu_realtime_scheduler( designer.set_vcpu_realtime_scheduler(
vcpusched, vcpus_rt, vcpusched, vcpus_rt, CONF.libvirt.realtime_scheduler_priority)
CONF.libvirt.realtime_scheduler_priority)
guest_cpu_tune.vcpusched.append(vcpusched) guest_cpu_tune.vcpusched.append(vcpusched)
cell_pairs = self._get_cell_pairs(guest_cpu_numa_config, cell_pairs = self._get_cell_pairs(guest_cpu_numa_config, topology)
topology)
for guest_node_id, (guest_config_cell, host_cell) in enumerate( for guest_node_id, (guest_config_cell, host_cell) in enumerate(
cell_pairs): cell_pairs):
# set NUMATune for the cell # set NUMATune for the cell
tnode = vconfig.LibvirtConfigGuestNUMATuneMemNode() tnode = vconfig.LibvirtConfigGuestNUMATuneMemNode()
designer.set_numa_memnode( designer.set_numa_memnode(tnode, guest_node_id, host_cell.id)
tnode, guest_node_id, host_cell.id)
guest_numa_tune.memnodes.append(tnode) guest_numa_tune.memnodes.append(tnode)
guest_numa_tune.memory.nodeset.append(host_cell.id) guest_numa_tune.memory.nodeset.append(host_cell.id)
# set CPUTune for the cell # set CPUTune for the cell
object_numa_cell = instance_numa_topology.cells[ object_numa_cell = instance_numa_topology.cells[guest_node_id]
guest_node_id]
for cpu in guest_config_cell.cpus: for cpu in guest_config_cell.cpus:
pin_cpuset = self._get_pin_cpuset( pin_cpuset = self._get_pin_cpuset(cpu, object_numa_cell,
cpu, object_numa_cell, host_cell) host_cell)
guest_cpu_tune.vcpupin.append(pin_cpuset) guest_cpu_tune.vcpupin.append(pin_cpuset)
emu_pin_cpuset = self._get_emulatorpin_cpuset( emu_pin_cpuset = self._get_emulatorpin_cpuset(
cpu, object_numa_cell, vcpus_rt, cpu, object_numa_cell, vcpus_rt,
emulator_threads_isolated, emulator_threads_isolated, wants_realtime, pin_cpuset)
wants_realtime, pin_cpuset) guest_cpu_tune.emulatorpin.cpuset.update(emu_pin_cpuset)
guest_cpu_tune.emulatorpin.cpuset.update(
emu_pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will # TODO(berrange) When the guest has >1 NUMA node, it will
# span multiple host NUMA nodes. By pinning emulator threads # span multiple host NUMA nodes. By pinning emulator threads
@ -4495,18 +4490,13 @@ class LibvirtDriver(driver.ComputeDriver):
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id")) guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
# normalize cell.id # normalize cell.id
for i, (cell, memnode) in enumerate( for i, (cell, memnode) in enumerate(zip(guest_cpu_numa_config.cells,
zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)): guest_numa_tune.memnodes)):
cell.id = i cell.id = i
memnode.cellid = i memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune, return GuestNumaConfig(None, guest_cpu_tune, guest_cpu_numa_config,
guest_cpu_numa_config,
guest_numa_tune) guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type): def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type.""" """Returns the guest OS type based on virt type."""