Backslash continuations (network, scheduler)
Fixes bug #925281 Backslash continuations removal for packages nova.network and nova.scheduler Change-Id: I84f886d6bc179ecf5d09e57d278bfa644d5d851d
This commit is contained in:
@@ -33,10 +33,9 @@ from nova import utils
|
||||
from eventlet import greenpool
|
||||
|
||||
|
||||
enable_zone_routing_opt = \
|
||||
cfg.BoolOpt('enable_zone_routing',
|
||||
default=False,
|
||||
help='When True, routing to child zones will occur.')
|
||||
enable_zone_routing_opt = cfg.BoolOpt('enable_zone_routing',
|
||||
default=False,
|
||||
help='When True, routing to child zones will occur.')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.add_option(enable_zone_routing_opt)
|
||||
@@ -310,8 +309,8 @@ class reroute_compute(object):
|
||||
def __call__(self, f):
|
||||
@functools.wraps(f)
|
||||
def wrapped_f(*args, **kwargs):
|
||||
collection, context, item_id_or_uuid = \
|
||||
self.get_collection_context_and_id(args, kwargs)
|
||||
_collection_info = self.get_collection_context_and_id(args, kwargs)
|
||||
collection, context, item_id_or_uuid = _collection_info
|
||||
|
||||
attempt_reroute = False
|
||||
if utils.is_uuid_like(item_id_or_uuid):
|
||||
|
||||
@@ -293,9 +293,9 @@ class DistributedScheduler(driver.Scheduler):
|
||||
instance_properties = request_spec['instance_properties']
|
||||
instance_type = request_spec.get("instance_type", None)
|
||||
if not instance_type:
|
||||
msg = _("Scheduler only understands InstanceType-based" \
|
||||
"provisioning.")
|
||||
raise NotImplementedError(msg)
|
||||
raise NotImplementedError(_("Scheduler only understands "
|
||||
"InstanceType-based "
|
||||
"provisioning."))
|
||||
|
||||
cost_functions = self.get_cost_functions()
|
||||
config_options = self._get_configuration_options()
|
||||
|
||||
@@ -23,10 +23,9 @@ from nova.scheduler.filters import abstract_filter
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.filter.core_filter')
|
||||
|
||||
cpu_allocation_ratio_opt = \
|
||||
cfg.FloatOpt('cpu_allocation_ratio',
|
||||
default=16.0,
|
||||
help='Virtual CPU to Physical CPU allocation ratio')
|
||||
cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
|
||||
default=16.0,
|
||||
help='Virtual CPU to Physical CPU allocation ratio')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.add_option(cpu_allocation_ratio_opt)
|
||||
|
||||
@@ -21,10 +21,9 @@ from nova.scheduler.filters import abstract_filter
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.filter.ram_filter')
|
||||
|
||||
ram_allocation_ratio_opt = \
|
||||
cfg.FloatOpt("ram_allocation_ratio",
|
||||
default=1.0,
|
||||
help="virtual ram to physical ram allocation ratio")
|
||||
ram_allocation_ratio_opt = cfg.FloatOpt("ram_allocation_ratio",
|
||||
default=1.0,
|
||||
help="virtual ram to physical ram allocation ratio")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.add_option(ram_allocation_ratio_opt)
|
||||
|
||||
@@ -145,8 +145,8 @@ class HostState(object):
|
||||
return True
|
||||
|
||||
def __repr__(self):
|
||||
return "host '%s': free_ram_mb:%s free_disk_mb:%s" % \
|
||||
(self.host, self.free_ram_mb, self.free_disk_mb)
|
||||
return ("host '%s': free_ram_mb:%s free_disk_mb:%s" %
|
||||
(self.host, self.free_ram_mb, self.free_disk_mb))
|
||||
|
||||
|
||||
class HostManager(object):
|
||||
@@ -275,8 +275,8 @@ class HostManager(object):
|
||||
"""Check if host service capabilites are not recent enough."""
|
||||
allowed_time_diff = FLAGS.periodic_interval * 3
|
||||
caps = self.service_states[host][service]
|
||||
if (utils.utcnow() - caps["timestamp"]) <= \
|
||||
datetime.timedelta(seconds=allowed_time_diff):
|
||||
if ((utils.utcnow() - caps["timestamp"]) <=
|
||||
datetime.timedelta(seconds=allowed_time_diff)):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
@@ -36,10 +36,9 @@ from nova import utils
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.manager')
|
||||
|
||||
scheduler_driver_opt = \
|
||||
cfg.StrOpt('scheduler_driver',
|
||||
default='nova.scheduler.multi.MultiScheduler',
|
||||
help='Default driver to use for the scheduler')
|
||||
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
|
||||
default='nova.scheduler.multi.MultiScheduler',
|
||||
help='Default driver to use for the scheduler')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.add_option(scheduler_driver_opt)
|
||||
|
||||
@@ -29,10 +29,10 @@ from nova import log as logging
|
||||
from nova.openstack.common import cfg
|
||||
|
||||
|
||||
scheduler_json_config_location_opt = \
|
||||
cfg.StrOpt('scheduler_json_config_location',
|
||||
default='',
|
||||
help='Absolute path to scheduler configuration JSON file.')
|
||||
scheduler_json_config_location_opt = cfg.StrOpt(
|
||||
'scheduler_json_config_location',
|
||||
default='',
|
||||
help='Absolute path to scheduler configuration JSON file.')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.add_option(scheduler_json_config_location_opt)
|
||||
@@ -92,8 +92,8 @@ class SchedulerOptions(object):
|
||||
return self.data
|
||||
|
||||
last_modified = self._get_file_timestamp(filename)
|
||||
if not last_modified or not self.last_modified or \
|
||||
last_modified > self.last_modified:
|
||||
if (not last_modified or not self.last_modified or
|
||||
last_modified > self.last_modified):
|
||||
self.data = self._load_file(self._get_file_handle(filename))
|
||||
self.last_modified = last_modified
|
||||
if not self.data:
|
||||
|
||||
@@ -82,8 +82,8 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if service['host'] in FLAGS.isolated_hosts and not in_isolation:
|
||||
# images that aren't isolated only run on general hosts
|
||||
continue
|
||||
if check_cores and \
|
||||
instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
|
||||
if (check_cores and
|
||||
instance_cores + instance_opts['vcpus'] > FLAGS.max_cores):
|
||||
msg = _("Not enough allocatable CPU cores remaining")
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
if utils.service_is_up(service) and not service['disabled']:
|
||||
|
||||
@@ -80,11 +80,10 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
def _compare_sizes_approxim(cap_capacity, size):
|
||||
cap_capacity = BYTES_TO_GB(int(cap_capacity))
|
||||
size = int(size)
|
||||
size_perc = size * \
|
||||
FLAGS.drive_type_approx_capacity_percent / 100
|
||||
size_perc = size * FLAGS.drive_type_approx_capacity_percent / 100
|
||||
|
||||
return cap_capacity >= size - size_perc and \
|
||||
cap_capacity <= size + size_perc
|
||||
return (cap_capacity >= size - size_perc and
|
||||
cap_capacity <= size + size_perc)
|
||||
|
||||
# Add more entries for additional comparisons
|
||||
compare_list = [{'cap1': 'DriveType',
|
||||
@@ -95,11 +94,11 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
'cmp_func': _compare_sizes_approxim}]
|
||||
|
||||
for cap in compare_list:
|
||||
if cap['cap1'] in qos_values.keys() and \
|
||||
cap['cap2'] in drive_type.keys() and \
|
||||
cap['cmp_func'] is not None and \
|
||||
cap['cmp_func'](qos_values[cap['cap1']],
|
||||
drive_type[cap['cap2']]):
|
||||
if (cap['cap1'] in qos_values.keys() and
|
||||
cap['cap2'] in drive_type.keys() and
|
||||
cap['cmp_func'] is not None and
|
||||
cap['cmp_func'](qos_values[cap['cap1']],
|
||||
drive_type[cap['cap2']])):
|
||||
pass
|
||||
else:
|
||||
return False
|
||||
@@ -209,7 +208,7 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
size = vol['size']
|
||||
host = vol['host']
|
||||
name = vol['name']
|
||||
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
|
||||
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "
|
||||
"host %(host)s"), locals())
|
||||
|
||||
volume_ref = db.volume_create(context.elevated(), options)
|
||||
@@ -253,8 +252,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
volume_type_id = vol['volume_type_id']
|
||||
request_spec['size'] = vol['size']
|
||||
|
||||
if prev_volume_type_id is None or\
|
||||
prev_volume_type_id != volume_type_id:
|
||||
if (prev_volume_type_id is None or
|
||||
prev_volume_type_id != volume_type_id):
|
||||
# generate list of hosts for this drive type
|
||||
|
||||
volume_type = volume_types.get_volume_type(context,
|
||||
@@ -323,8 +322,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
if volume_type_id:
|
||||
volume_type = volume_types.get_volume_type(context, volume_type_id)
|
||||
|
||||
if volume_type_id is None or\
|
||||
volume_types.is_vsa_volume(volume_type_id, volume_type):
|
||||
if (volume_type_id is None or
|
||||
volume_types.is_vsa_volume(volume_type_id, volume_type)):
|
||||
|
||||
LOG.debug(_("Non-VSA volume %d"), volume_ref['id'])
|
||||
return super(VsaScheduler, self).schedule_create_volume(context,
|
||||
@@ -339,7 +338,7 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
'rpm': volume_type['extra_specs'].get('drive_rpm'),
|
||||
}
|
||||
|
||||
LOG.debug(_("Spawning volume %(volume_id)s with drive type "\
|
||||
LOG.debug(_("Spawning volume %(volume_id)s with drive type "
|
||||
"%(drive_type)s"), locals())
|
||||
|
||||
request_spec = {'size': volume_ref['size'],
|
||||
@@ -371,23 +370,21 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
partition_size = size
|
||||
part_per_drive = qos_values['DriveCapacity'] / partition_size
|
||||
|
||||
if direction == -1 and \
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] == 0:
|
||||
if (direction == -1 and
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] == 0):
|
||||
|
||||
self._consume_full_drive(qos_values, direction)
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] += \
|
||||
part_per_drive
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] += part_per_drive
|
||||
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] += direction
|
||||
qos_values['PartitionDrive']['NumOccupiedPartitions'] -= direction
|
||||
|
||||
if direction == 1 and \
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] >= \
|
||||
part_per_drive:
|
||||
if (direction == 1 and
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] >=
|
||||
part_per_drive):
|
||||
|
||||
self._consume_full_drive(qos_values, direction)
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] -= \
|
||||
part_per_drive
|
||||
qos_values['PartitionDrive']['NumFreePartitions'] -= part_per_drive
|
||||
|
||||
def _consume_resource(self, qos_values, size, direction):
|
||||
if qos_values is None:
|
||||
@@ -396,8 +393,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
return
|
||||
|
||||
if size == 0: # full drive match
|
||||
qos_values['AvailableCapacity'] += direction * \
|
||||
qos_values['DriveCapacity']
|
||||
qos_values['AvailableCapacity'] += (direction *
|
||||
qos_values['DriveCapacity'])
|
||||
self._consume_full_drive(qos_values, direction)
|
||||
else:
|
||||
qos_values['AvailableCapacity'] += direction * GB_TO_BYTES(size)
|
||||
@@ -420,8 +417,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
free = qos_values['FullDrive']['NumFreeDrives']
|
||||
avail = BYTES_TO_GB(qos_values['AvailableCapacity'])
|
||||
|
||||
LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "\
|
||||
"used %(used)2s, free %(free)2s. Available "\
|
||||
LOG.info(_("\tDrive %(qosgrp)-25s: total %(total)2s, "
|
||||
"used %(used)2s, free %(free)2s. Available "
|
||||
"capacity %(avail)-5s"), locals())
|
||||
|
||||
|
||||
@@ -449,8 +446,8 @@ class VsaSchedulerLeastUsedHost(VsaScheduler):
|
||||
used_capacity = 0
|
||||
for qosgrp, qos_values in capabilities.iteritems():
|
||||
|
||||
used_capacity = used_capacity + qos_values['TotalCapacity'] \
|
||||
- qos_values['AvailableCapacity']
|
||||
used_capacity = (used_capacity + qos_values['TotalCapacity'] -
|
||||
qos_values['AvailableCapacity'])
|
||||
|
||||
if self._qosgrp_match(drive_type, qos_values):
|
||||
# we found required qosgroup
|
||||
@@ -462,20 +459,18 @@ class VsaSchedulerLeastUsedHost(VsaScheduler):
|
||||
else:
|
||||
break
|
||||
else:
|
||||
if qos_values['AvailableCapacity'] >= size and \
|
||||
(qos_values['PartitionDrive'][
|
||||
'NumFreePartitions'] > 0 or \
|
||||
qos_values['FullDrive']['NumFreeDrives'] > 0):
|
||||
_fp = qos_values['PartitionDrive']['NumFreePartitions']
|
||||
_fd = qos_values['FullDrive']['NumFreeDrives']
|
||||
if (qos_values['AvailableCapacity'] >= size and
|
||||
(_fp > 0 or _fd > 0)):
|
||||
has_enough_capacity = True
|
||||
matched_qos = qos_values
|
||||
else:
|
||||
break
|
||||
|
||||
if has_enough_capacity and \
|
||||
self._allowed_to_use_host(host,
|
||||
selected_hosts,
|
||||
unique) and \
|
||||
(best_host is None or used_capacity < min_used):
|
||||
if (has_enough_capacity and
|
||||
self._allowed_to_use_host(host, selected_hosts, unique) and
|
||||
(best_host is None or used_capacity < min_used)):
|
||||
|
||||
min_used = used_capacity
|
||||
best_host = host
|
||||
@@ -485,7 +480,7 @@ class VsaSchedulerLeastUsedHost(VsaScheduler):
|
||||
if best_host:
|
||||
self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
|
||||
min_used = BYTES_TO_GB(min_used)
|
||||
LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "\
|
||||
LOG.debug(_("\t LeastUsedHost: Best host: %(best_host)s. "
|
||||
"(used capacity %(min_used)s)"), locals())
|
||||
return (best_host, best_qoscap)
|
||||
|
||||
@@ -518,10 +513,9 @@ class VsaSchedulerMostAvailCapacity(VsaScheduler):
|
||||
else:
|
||||
available = qos_values['AvailableCapacity']
|
||||
|
||||
if available > max_avail and \
|
||||
self._allowed_to_use_host(host,
|
||||
selected_hosts,
|
||||
unique):
|
||||
if (available > max_avail and
|
||||
self._allowed_to_use_host(host, selected_hosts,
|
||||
unique)):
|
||||
max_avail = available
|
||||
best_host = host
|
||||
best_qoscap = qos_values
|
||||
@@ -531,7 +525,7 @@ class VsaSchedulerMostAvailCapacity(VsaScheduler):
|
||||
if best_host:
|
||||
self._add_hostcap_to_list(selected_hosts, best_host, best_cap)
|
||||
type_str = "drives" if size == 0 else "bytes"
|
||||
LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "\
|
||||
LOG.debug(_("\t MostAvailCap: Best host: %(best_host)s. "
|
||||
"(available %(max_avail)s %(type_str)s)"), locals())
|
||||
|
||||
return (best_host, best_qoscap)
|
||||
|
||||
Reference in New Issue
Block a user