Switch from FLAGS to CONF in nova.scheduler
Use the global CONF variable instead of FLAGS. This is purely a cleanup since FLAGS is already just another reference to CONF. We leave the nova.flags imports until a later cleanup commit since removing them may cause unpredictable problems due to config options not being registered. Change-Id: Ia3f66aec300fd88e91cef926da735f9e95aa2cc2
This commit is contained in:
parent
45af1f23a4
commit
9e1d5467f6
@ -23,11 +23,12 @@ Chance (Random) Scheduler implementation
|
||||
|
||||
import random
|
||||
|
||||
from nova import config
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova.scheduler import driver
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class ChanceScheduler(driver.Scheduler):
|
||||
@ -65,7 +66,7 @@ class ChanceScheduler(driver.Scheduler):
|
||||
for num, instance_uuid in enumerate(instance_uuids):
|
||||
request_spec['instance_properties']['launch_index'] = num
|
||||
try:
|
||||
host = self._schedule(context, FLAGS.compute_topic,
|
||||
host = self._schedule(context, CONF.compute_topic,
|
||||
request_spec, filter_properties)
|
||||
updated_instance = driver.instance_update_db(context,
|
||||
instance_uuid)
|
||||
@ -88,7 +89,7 @@ class ChanceScheduler(driver.Scheduler):
|
||||
filter_properties, instance, instance_type,
|
||||
reservations):
|
||||
"""Select a target for resize."""
|
||||
host = self._schedule(context, FLAGS.compute_topic, request_spec,
|
||||
host = self._schedule(context, CONF.compute_topic, request_spec,
|
||||
filter_properties)
|
||||
self.compute_rpcapi.prep_resize(context, image, instance,
|
||||
instance_type, host, reservations)
|
||||
|
@ -53,8 +53,8 @@ scheduler_driver_opts = [
|
||||
help='Maximum number of attempts to schedule an instance'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(scheduler_driver_opts)
|
||||
CONF = config.CONF
|
||||
CONF.register_opts(scheduler_driver_opts)
|
||||
|
||||
CONF = config.CONF
|
||||
CONF.import_opt('instances_path', 'nova.compute.manager')
|
||||
@ -107,7 +107,7 @@ def cast_to_compute_host(context, host, method, **kwargs):
|
||||
instance_update_db(context, instance_uuid)
|
||||
|
||||
rpc.cast(context,
|
||||
rpc.queue_get_for(context, FLAGS.compute_topic, host),
|
||||
rpc.queue_get_for(context, CONF.compute_topic, host),
|
||||
{"method": method, "args": kwargs})
|
||||
LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
|
||||
|
||||
@ -115,7 +115,7 @@ def cast_to_compute_host(context, host, method, **kwargs):
|
||||
def cast_to_host(context, topic, host, method, **kwargs):
|
||||
"""Generic cast to host"""
|
||||
|
||||
topic_mapping = {FLAGS.compute_topic: cast_to_compute_host}
|
||||
topic_mapping = {CONF.compute_topic: cast_to_compute_host}
|
||||
|
||||
func = topic_mapping.get(topic)
|
||||
if func:
|
||||
@ -151,7 +151,7 @@ class Scheduler(object):
|
||||
|
||||
def __init__(self):
|
||||
self.host_manager = importutils.import_object(
|
||||
FLAGS.scheduler_host_manager)
|
||||
CONF.scheduler_host_manager)
|
||||
self.compute_api = compute_api.API()
|
||||
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
|
||||
|
||||
|
@ -21,6 +21,7 @@ Weighing Functions.
|
||||
|
||||
import operator
|
||||
|
||||
from nova import config
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova.openstack.common import importutils
|
||||
@ -30,8 +31,7 @@ from nova.scheduler import driver
|
||||
from nova.scheduler import least_cost
|
||||
from nova.scheduler import scheduler_options
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -61,7 +61,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
notifier.notify(context, notifier.publisher_id("scheduler"),
|
||||
'scheduler.run_instance.start', notifier.INFO, payload)
|
||||
|
||||
weighted_hosts = self._schedule(context, FLAGS.compute_topic,
|
||||
weighted_hosts = self._schedule(context, CONF.compute_topic,
|
||||
request_spec, filter_properties,
|
||||
instance_uuids)
|
||||
|
||||
@ -108,7 +108,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
the prep_resize operation to it.
|
||||
"""
|
||||
|
||||
hosts = self._schedule(context, FLAGS.compute_topic, request_spec,
|
||||
hosts = self._schedule(context, CONF.compute_topic, request_spec,
|
||||
filter_properties, [instance['uuid']])
|
||||
if not hosts:
|
||||
raise exception.NoValidHost(reason="")
|
||||
@ -187,7 +187,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
filter_properties['os_type'] = os_type
|
||||
|
||||
def _max_attempts(self):
|
||||
max_attempts = FLAGS.scheduler_max_attempts
|
||||
max_attempts = CONF.scheduler_max_attempts
|
||||
if max_attempts < 1:
|
||||
raise exception.NovaException(_("Invalid value for "
|
||||
"'scheduler_max_attempts', must be >= 1"))
|
||||
@ -226,7 +226,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
ordered by their fitness.
|
||||
"""
|
||||
elevated = context.elevated()
|
||||
if topic != FLAGS.compute_topic:
|
||||
if topic != CONF.compute_topic:
|
||||
msg = _("Scheduler only understands Compute nodes (for now)")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
@ -306,12 +306,12 @@ class FilterScheduler(driver.Scheduler):
|
||||
"""
|
||||
if topic is None:
|
||||
# Schedulers only support compute right now.
|
||||
topic = FLAGS.compute_topic
|
||||
topic = CONF.compute_topic
|
||||
if topic in self.cost_function_cache:
|
||||
return self.cost_function_cache[topic]
|
||||
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_functions:
|
||||
for cost_fn_str in CONF.least_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
@ -333,7 +333,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
weight = getattr(CONF, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
|
@ -13,12 +13,13 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import log as logging
|
||||
from nova.scheduler import filters
|
||||
from nova import utils
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -29,7 +30,7 @@ class ComputeFilter(filters.BaseHostFilter):
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Returns True for only active compute nodes"""
|
||||
instance_type = filter_properties.get('instance_type')
|
||||
if host_state.topic != FLAGS.compute_topic or not instance_type:
|
||||
if host_state.topic != CONF.compute_topic or not instance_type:
|
||||
return True
|
||||
capabilities = host_state.capabilities
|
||||
service = host_state.service
|
||||
|
@ -15,6 +15,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -27,8 +28,8 @@ cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio',
|
||||
default=16.0,
|
||||
help='Virtual CPU to Physical CPU allocation ratio')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(cpu_allocation_ratio_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(cpu_allocation_ratio_opt)
|
||||
|
||||
|
||||
class CoreFilter(filters.BaseHostFilter):
|
||||
@ -37,7 +38,7 @@ class CoreFilter(filters.BaseHostFilter):
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return True if host has sufficient CPU cores."""
|
||||
instance_type = filter_properties.get('instance_type')
|
||||
if host_state.topic != FLAGS.compute_topic or not instance_type:
|
||||
if host_state.topic != CONF.compute_topic or not instance_type:
|
||||
return True
|
||||
|
||||
if not host_state.vcpus_total:
|
||||
@ -46,7 +47,7 @@ class CoreFilter(filters.BaseHostFilter):
|
||||
return True
|
||||
|
||||
instance_vcpus = instance_type['vcpus']
|
||||
vcpus_total = host_state.vcpus_total * FLAGS.cpu_allocation_ratio
|
||||
vcpus_total = host_state.vcpus_total * CONF.cpu_allocation_ratio
|
||||
|
||||
# Only provide a VCPU limit to compute if the virt driver is reporting
|
||||
# an accurate count of installed VCPUs. (XenServer driver does not)
|
||||
|
@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -23,8 +24,8 @@ LOG = logging.getLogger(__name__)
|
||||
disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio", default=1.0,
|
||||
help="virtual disk to physical disk allocation ratio")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(disk_allocation_ratio_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(disk_allocation_ratio_opt)
|
||||
|
||||
|
||||
class DiskFilter(filters.BaseHostFilter):
|
||||
@ -39,7 +40,7 @@ class DiskFilter(filters.BaseHostFilter):
|
||||
free_disk_mb = host_state.free_disk_mb
|
||||
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
|
||||
|
||||
disk_mb_limit = total_usable_disk_mb * FLAGS.disk_allocation_ratio
|
||||
disk_mb_limit = total_usable_disk_mb * CONF.disk_allocation_ratio
|
||||
used_disk_mb = total_usable_disk_mb - free_disk_mb
|
||||
usable_disk_mb = disk_mb_limit - used_disk_mb
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -24,8 +25,8 @@ max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
|
||||
default=8,
|
||||
help="Ignore hosts that have too many builds/resizes/snaps/migrations")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(max_io_ops_per_host_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(max_io_ops_per_host_opt)
|
||||
|
||||
|
||||
class IoOpsFilter(filters.BaseHostFilter):
|
||||
@ -36,7 +37,7 @@ class IoOpsFilter(filters.BaseHostFilter):
|
||||
compute node statistics to decide whether to filter.
|
||||
"""
|
||||
num_io_ops = host_state.num_io_ops
|
||||
max_io_ops = FLAGS.max_io_ops_per_host
|
||||
max_io_ops = CONF.max_io_ops_per_host
|
||||
passes = num_io_ops < max_io_ops
|
||||
if not passes:
|
||||
LOG.debug(_("%(host_state)s fails I/O ops check: Max IOs per host "
|
||||
|
@ -13,12 +13,11 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.scheduler import filters
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class IsolatedHostsFilter(filters.BaseHostFilter):
|
||||
@ -28,6 +27,6 @@ class IsolatedHostsFilter(filters.BaseHostFilter):
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
props = spec.get('instance_properties', {})
|
||||
image_ref = props.get('image_ref')
|
||||
image_isolated = image_ref in FLAGS.isolated_images
|
||||
host_isolated = host_state.host in FLAGS.isolated_hosts
|
||||
image_isolated = image_ref in CONF.isolated_images
|
||||
host_isolated = host_state.host in CONF.isolated_hosts
|
||||
return image_isolated == host_isolated
|
||||
|
@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -24,8 +25,8 @@ max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
|
||||
default=50,
|
||||
help="Ignore hosts that have too many instances")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(max_instances_per_host_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(max_instances_per_host_opt)
|
||||
|
||||
|
||||
class NumInstancesFilter(filters.BaseHostFilter):
|
||||
@ -33,7 +34,7 @@ class NumInstancesFilter(filters.BaseHostFilter):
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
num_instances = host_state.num_instances
|
||||
max_instances = FLAGS.max_instances_per_host
|
||||
max_instances = CONF.max_instances_per_host
|
||||
passes = num_instances < max_instances
|
||||
if not passes:
|
||||
LOG.debug(_("%(host_state)s fails num_instances check: Max "
|
||||
|
@ -14,6 +14,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -25,8 +26,8 @@ ram_allocation_ratio_opt = cfg.FloatOpt("ram_allocation_ratio",
|
||||
default=1.5,
|
||||
help="virtual ram to physical ram allocation ratio")
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(ram_allocation_ratio_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(ram_allocation_ratio_opt)
|
||||
|
||||
|
||||
class RamFilter(filters.BaseHostFilter):
|
||||
@ -39,7 +40,7 @@ class RamFilter(filters.BaseHostFilter):
|
||||
free_ram_mb = host_state.free_ram_mb
|
||||
total_usable_ram_mb = host_state.total_usable_ram_mb
|
||||
|
||||
memory_mb_limit = total_usable_ram_mb * FLAGS.ram_allocation_ratio
|
||||
memory_mb_limit = total_usable_ram_mb * CONF.ram_allocation_ratio
|
||||
used_ram_mb = total_usable_ram_mb - free_ram_mb
|
||||
usable_ram = memory_mb_limit - used_ram_mb
|
||||
if not usable_ram >= requested_ram:
|
||||
|
@ -48,6 +48,7 @@ import httplib
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import jsonutils
|
||||
@ -81,10 +82,10 @@ trusted_opts = [
|
||||
help='attestation authorization blob - must change'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = config.CONF
|
||||
trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters')
|
||||
FLAGS.register_group(trust_group)
|
||||
FLAGS.register_opts(trusted_opts, group='trusted_computing')
|
||||
CONF.register_group(trust_group)
|
||||
CONF.register_opts(trusted_opts, group=trust_group)
|
||||
|
||||
|
||||
class HTTPSClientAuthConnection(httplib.HTTPSConnection):
|
||||
@ -124,13 +125,13 @@ class AttestationService(httplib.HTTPSConnection):
|
||||
# Provide access wrapper to attestation server to get integrity report.
|
||||
|
||||
def __init__(self):
|
||||
self.api_url = FLAGS.trusted_computing.attestation_api_url
|
||||
self.host = FLAGS.trusted_computing.attestation_server
|
||||
self.port = FLAGS.trusted_computing.attestation_port
|
||||
self.auth_blob = FLAGS.trusted_computing.attestation_auth_blob
|
||||
self.api_url = CONF.trusted_computing.attestation_api_url
|
||||
self.host = CONF.trusted_computing.attestation_server
|
||||
self.port = CONF.trusted_computing.attestation_port
|
||||
self.auth_blob = CONF.trusted_computing.attestation_auth_blob
|
||||
self.key_file = None
|
||||
self.cert_file = None
|
||||
self.ca_file = FLAGS.trusted_computing.attestation_server_ca_file
|
||||
self.ca_file = CONF.trusted_computing.attestation_server_ca_file
|
||||
self.request_count = 100
|
||||
|
||||
def _do_request(self, method, action_url, body, headers):
|
||||
|
@ -21,6 +21,7 @@ import UserDict
|
||||
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import config
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
@ -49,8 +50,8 @@ host_manager_opts = [
|
||||
'when not specified in the request.'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(host_manager_opts)
|
||||
CONF = config.CONF
|
||||
CONF.register_opts(host_manager_opts)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -281,7 +282,7 @@ class HostManager(object):
|
||||
self.service_states = {} # { <host> : { <service> : { cap k : v }}}
|
||||
self.host_state_map = {}
|
||||
self.filter_classes = filters.get_filter_classes(
|
||||
FLAGS.scheduler_available_filters)
|
||||
CONF.scheduler_available_filters)
|
||||
|
||||
def _choose_host_filters(self, filters):
|
||||
"""Since the caller may specify which filters to use we need
|
||||
@ -290,7 +291,7 @@ class HostManager(object):
|
||||
of acceptable filters.
|
||||
"""
|
||||
if filters is None:
|
||||
filters = FLAGS.scheduler_default_filters
|
||||
filters = CONF.scheduler_default_filters
|
||||
if not isinstance(filters, (list, tuple)):
|
||||
filters = [filters]
|
||||
good_filters = []
|
||||
@ -347,7 +348,7 @@ class HostManager(object):
|
||||
with the instance (in case the InstanceType changed since the
|
||||
instance was created)."""
|
||||
|
||||
if topic != FLAGS.compute_topic:
|
||||
if topic != CONF.compute_topic:
|
||||
raise NotImplementedError(_(
|
||||
"host_manager only implemented for 'compute'"))
|
||||
|
||||
|
@ -22,6 +22,7 @@ The cost-function and weights are tabulated, and the host with the least cost
|
||||
is then selected for provisioning.
|
||||
"""
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -45,8 +46,8 @@ least_cost_opts = [
|
||||
'e.g. spread-first'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(least_cost_opts)
|
||||
CONF = config.CONF
|
||||
CONF.register_opts(least_cost_opts)
|
||||
|
||||
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||
|
@ -26,6 +26,7 @@ import sys
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.compute import vm_states
|
||||
from nova import config
|
||||
import nova.context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
@ -46,8 +47,8 @@ scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
|
||||
default='nova.scheduler.filter_scheduler.FilterScheduler',
|
||||
help='Default driver to use for the scheduler')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(scheduler_driver_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(scheduler_driver_opt)
|
||||
|
||||
QUOTAS = quota.QUOTAS
|
||||
|
||||
@ -59,7 +60,7 @@ class SchedulerManager(manager.Manager):
|
||||
|
||||
def __init__(self, scheduler_driver=None, *args, **kwargs):
|
||||
if not scheduler_driver:
|
||||
scheduler_driver = FLAGS.scheduler_driver
|
||||
scheduler_driver = CONF.scheduler_driver
|
||||
self.driver = importutils.import_object(scheduler_driver)
|
||||
super(SchedulerManager, self).__init__(*args, **kwargs)
|
||||
|
||||
|
@ -27,6 +27,7 @@ schedule requests to compute nodes but provide their own manager and topic.
|
||||
https://bugs.launchpad.net/nova/+bug/1009681
|
||||
"""
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import importutils
|
||||
@ -43,8 +44,8 @@ multi_scheduler_opts = [
|
||||
help='Default driver to use for scheduling calls'),
|
||||
]
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opts(multi_scheduler_opts)
|
||||
CONF = config.CONF
|
||||
CONF.register_opts(multi_scheduler_opts)
|
||||
|
||||
|
||||
class MultiScheduler(driver.Scheduler):
|
||||
@ -58,9 +59,9 @@ class MultiScheduler(driver.Scheduler):
|
||||
def __init__(self):
|
||||
super(MultiScheduler, self).__init__()
|
||||
compute_driver = importutils.import_object(
|
||||
FLAGS.compute_scheduler_driver)
|
||||
CONF.compute_scheduler_driver)
|
||||
default_driver = importutils.import_object(
|
||||
FLAGS.default_scheduler_driver)
|
||||
CONF.default_scheduler_driver)
|
||||
|
||||
self.drivers = {'compute': compute_driver,
|
||||
'default': default_driver}
|
||||
|
@ -18,12 +18,12 @@
|
||||
Client side of the scheduler manager RPC API.
|
||||
"""
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import jsonutils
|
||||
import nova.openstack.common.rpc.proxy
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
||||
@ -60,7 +60,7 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
|
||||
BASE_RPC_API_VERSION = '2.0'
|
||||
|
||||
def __init__(self):
|
||||
super(SchedulerAPI, self).__init__(topic=FLAGS.scheduler_topic,
|
||||
super(SchedulerAPI, self).__init__(topic=CONF.scheduler_topic,
|
||||
default_version=self.BASE_RPC_API_VERSION)
|
||||
|
||||
def run_instance(self, ctxt, request_spec, admin_password,
|
||||
|
@ -26,6 +26,7 @@ import datetime
|
||||
import json
|
||||
import os
|
||||
|
||||
from nova import config
|
||||
from nova import flags
|
||||
from nova.openstack.common import cfg
|
||||
from nova.openstack.common import log as logging
|
||||
@ -37,8 +38,8 @@ scheduler_json_config_location_opt = cfg.StrOpt(
|
||||
default='',
|
||||
help='Absolute path to scheduler configuration JSON file.')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
FLAGS.register_opt(scheduler_json_config_location_opt)
|
||||
CONF = config.CONF
|
||||
CONF.register_opt(scheduler_json_config_location_opt)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -86,7 +87,7 @@ class SchedulerOptions(object):
|
||||
def get_configuration(self, filename=None):
|
||||
"""Check the json file for changes and load it if needed."""
|
||||
if not filename:
|
||||
filename = FLAGS.scheduler_json_config_location
|
||||
filename = CONF.scheduler_json_config_location
|
||||
if not filename:
|
||||
return self.data
|
||||
if self.last_checked:
|
||||
|
Loading…
Reference in New Issue
Block a user