Switch from FLAGS to CONF in nova.compute

Use the global CONF variable instead of FLAGS. This is purely a cleanup
since FLAGS is already just another reference to CONF.

We leave the nova.flags imports until a later cleanup commit since
removing them may cause unpredictable problems due to config options not
being registered.

Change-Id: I0488b406dcf9f9459810337b62b6932d71a623a9
This commit is contained in:
Mark McLoughlin
2012-11-04 21:32:47 +00:00
parent 22ece8529d
commit 9fbc478b5b
7 changed files with 71 additions and 67 deletions

View File

@@ -18,11 +18,12 @@
# Importing full names to not pollute the namespace and cause possible # Importing full names to not pollute the namespace and cause possible
# collisions with use of 'from nova.compute import <foo>' elsewhere. # collisions with use of 'from nova.compute import <foo>' elsewhere.
import nova.config
import nova.flags import nova.flags
import nova.openstack.common.importutils import nova.openstack.common.importutils
def API(*args, **kwargs): def API(*args, **kwargs):
importutils = nova.openstack.common.importutils importutils = nova.openstack.common.importutils
cls = importutils.import_class(nova.flags.FLAGS.compute_api_class) cls = importutils.import_class(nova.config.CONF.compute_api_class)
return cls(*args, **kwargs) return cls(*args, **kwargs)

View File

@@ -59,7 +59,6 @@ from nova import volume
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
CONF = config.CONF CONF = config.CONF
CONF.import_opt('consoleauth_topic', 'nova.consoleauth') CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
@@ -140,7 +139,7 @@ class API(base.Base):
self.network_api = network_api or network.API() self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API() self.volume_api = volume_api or volume.API()
self.security_group_api = security_group_api or SecurityGroupAPI() self.security_group_api = security_group_api or SecurityGroupAPI()
self.sgh = importutils.import_object(FLAGS.security_group_handler) self.sgh = importutils.import_object(CONF.security_group_handler)
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI() self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -310,7 +309,7 @@ class API(base.Base):
ramdisk_id = image['properties'].get('ramdisk_id') ramdisk_id = image['properties'].get('ramdisk_id')
# Force to None if using null_kernel # Force to None if using null_kernel
if kernel_id == str(FLAGS.null_kernel): if kernel_id == str(CONF.null_kernel):
kernel_id = None kernel_id = None
ramdisk_id = None ramdisk_id = None
@@ -334,7 +333,7 @@ class API(base.Base):
availability_zone, forced_host = availability_zone.split(':') availability_zone, forced_host = availability_zone.split(':')
if not availability_zone: if not availability_zone:
availability_zone = FLAGS.default_schedule_zone availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host return availability_zone, forced_host
@@ -1305,7 +1304,7 @@ class API(base.Base):
key = key[len(prefix):] key = key[len(prefix):]
# Skip properties that are non-inheritable # Skip properties that are non-inheritable
if key in FLAGS.non_inheritable_image_properties: if key in CONF.non_inheritable_image_properties:
continue continue
# By using setdefault, we ensure that the properties set # By using setdefault, we ensure that the properties set
@@ -1749,7 +1748,7 @@ class API(base.Base):
filter_properties = {'ignore_hosts': []} filter_properties = {'ignore_hosts': []}
if not FLAGS.allow_resize_to_same_host: if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host']) filter_properties['ignore_hosts'].append(instance['host'])
args = { args = {
@@ -2117,7 +2116,7 @@ class AggregateAPI(base.Base):
"""Creates the model for the aggregate.""" """Creates the model for the aggregate."""
zones = [s.availability_zone for s in zones = [s.availability_zone for s in
self.db.service_get_all_by_topic(context, self.db.service_get_all_by_topic(context,
FLAGS.compute_topic)] CONF.compute_topic)]
if availability_zone in zones: if availability_zone in zones:
values = {"name": aggregate_name, values = {"name": aggregate_name,
"availability_zone": availability_zone} "availability_zone": availability_zone}
@@ -2304,7 +2303,7 @@ class SecurityGroupAPI(base.Base):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs) super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI() self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
self.sgh = importutils.import_object(FLAGS.security_group_handler) self.sgh = importutils.import_object(CONF.security_group_handler)
def validate_property(self, value, property, allowed): def validate_property(self, value, property, allowed):
""" """

View File

@@ -22,6 +22,7 @@
import re import re
from nova import config
from nova import context from nova import context
from nova import db from nova import db
from nova import exception from nova import exception
@@ -29,7 +30,7 @@ from nova import flags
from nova.openstack.common import log as logging from nova.openstack.common import log as logging
from nova import utils from nova import utils
FLAGS = flags.FLAGS CONF = config.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
INVALID_NAME_REGEX = re.compile("[^\w\.\- ]") INVALID_NAME_REGEX = re.compile("[^\w\.\- ]")
@@ -129,7 +130,7 @@ get_all_flavors = get_all_types
def get_default_instance_type(): def get_default_instance_type():
"""Get the default instance type.""" """Get the default instance type."""
name = FLAGS.default_instance_type name = CONF.default_instance_type
return get_instance_type_by_name(name) return get_instance_type_by_name(name)

View File

@@ -52,6 +52,7 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states from nova.compute import task_states
from nova.compute import utils as compute_utils from nova.compute import utils as compute_utils
from nova.compute import vm_states from nova.compute import vm_states
from nova import config
import nova.context import nova.context
from nova import exception from nova import exception
from nova import flags from nova import flags
@@ -140,8 +141,8 @@ compute_opts = [
help="Generate periodic compute.instance.exists notifications"), help="Generate periodic compute.instance.exists notifications"),
] ]
FLAGS = flags.FLAGS CONF = config.CONF
FLAGS.register_opts(compute_opts) CONF.register_opts(compute_opts)
QUOTAS = quota.QUOTAS QUOTAS = quota.QUOTAS
@@ -237,7 +238,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# TODO(vish): sync driver creation logic with the rest of the system # TODO(vish): sync driver creation logic with the rest of the system
# and re-document the module docstring # and re-document the module docstring
if not compute_driver: if not compute_driver:
compute_driver = FLAGS.compute_driver compute_driver = CONF.compute_driver
if not compute_driver: if not compute_driver:
LOG.error(_("Compute driver option required, but not specified")) LOG.error(_("Compute driver option required, but not specified"))
@@ -258,7 +259,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api = network.API() self.network_api = network.API()
self.volume_api = volume.API() self.volume_api = volume.API()
self.network_manager = importutils.import_object( self.network_manager = importutils.import_object(
FLAGS.network_manager, host=kwargs.get('host', None)) CONF.network_manager, host=kwargs.get('host', None))
self._last_host_check = 0 self._last_host_check = 0
self._last_bw_usage_poll = 0 self._last_bw_usage_poll = 0
self._last_info_cache_heal = 0 self._last_info_cache_heal = 0
@@ -306,7 +307,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context = nova.context.get_admin_context() context = nova.context.get_admin_context()
instances = self.db.instance_get_all_by_host(context, self.host) instances = self.db.instance_get_all_by_host(context, self.host)
if FLAGS.defer_iptables_apply: if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_on() self.driver.filter_defer_apply_on()
try: try:
@@ -335,8 +336,8 @@ class ComputeManager(manager.SchedulerDependentManager):
legacy_net_info = self._legacy_nw_info(net_info) legacy_net_info = self._legacy_nw_info(net_info)
self.driver.plug_vifs(instance, legacy_net_info) self.driver.plug_vifs(instance, legacy_net_info)
if ((expect_running and FLAGS.resume_guests_state_on_host_boot) if ((expect_running and CONF.resume_guests_state_on_host_boot)
or FLAGS.start_guests_on_host_boot): or CONF.start_guests_on_host_boot):
LOG.info( LOG.info(
_('Rebooting instance after nova-compute restart.'), _('Rebooting instance after nova-compute restart.'),
locals(), instance=instance) locals(), instance=instance)
@@ -366,7 +367,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'firewall rules'), instance=instance) 'firewall rules'), instance=instance)
finally: finally:
if FLAGS.defer_iptables_apply: if CONF.defer_iptables_apply:
self.driver.filter_defer_apply_off() self.driver.filter_defer_apply_off()
self._report_driver_status(context) self._report_driver_status(context)
@@ -395,8 +396,8 @@ class ComputeManager(manager.SchedulerDependentManager):
""" """
#TODO(mdragon): perhaps make this variable by console_type? #TODO(mdragon): perhaps make this variable by console_type?
return rpc.queue_get_for(context, return rpc.queue_get_for(context,
FLAGS.console_topic, CONF.console_topic,
FLAGS.console_host) CONF.console_host)
def get_console_pool_info(self, context, console_type): def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type) return self.driver.get_console_pool_info(console_type)
@@ -644,7 +645,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task @manager.periodic_task
def _check_instance_build_time(self, context): def _check_instance_build_time(self, context):
"""Ensure that instances are not stuck in build.""" """Ensure that instances are not stuck in build."""
timeout = FLAGS.instance_build_timeout timeout = CONF.instance_build_timeout
if timeout == 0: if timeout == 0:
return return
@@ -660,13 +661,13 @@ class ComputeManager(manager.SchedulerDependentManager):
def _update_access_ip(self, context, instance, nw_info): def _update_access_ip(self, context, instance, nw_info):
"""Update the access ip values for a given instance. """Update the access ip values for a given instance.
If FLAGS.default_access_ip_network_name is set, this method will If CONF.default_access_ip_network_name is set, this method will
grab the corresponding network and set the access ip values grab the corresponding network and set the access ip values
accordingly. Note that when there are multiple ips to choose from, accordingly. Note that when there are multiple ips to choose from,
an arbitrary one will be chosen. an arbitrary one will be chosen.
""" """
network_name = FLAGS.default_access_ip_network_name network_name = CONF.default_access_ip_network_name
if not network_name: if not network_name:
return return
@@ -758,7 +759,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.BUILDING, vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING, task_state=task_states.NETWORKING,
expected_task_state=None) expected_task_state=None)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id) is_vpn = instance['image_ref'] == str(CONF.vpn_image_id)
try: try:
# allocate and get network info # allocate and get network info
network_info = self.network_api.allocate_for_instance( network_info = self.network_api.allocate_for_instance(
@@ -816,7 +817,7 @@ class ComputeManager(manager.SchedulerDependentManager):
extra_usage_info=None): extra_usage_info=None):
# NOTE(sirp): The only thing this wrapper function does extra is handle # NOTE(sirp): The only thing this wrapper function does extra is handle
# the passing in of `self.host`. Ordinarily this will just be # the passing in of `self.host`. Ordinarily this will just be
# `FLAGS.host`, but `Manager`'s gets a chance to override this in its # CONF.host`, but `Manager`'s gets a chance to override this in its
# `__init__`. # `__init__`.
compute_utils.notify_about_instance_usage( compute_utils.notify_about_instance_usage(
context, instance, event_suffix, network_info=network_info, context, instance, event_suffix, network_info=network_info,
@@ -1372,7 +1373,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if new_pass is None: if new_pass is None:
# Generate a random password # Generate a random password
new_pass = utils.generate_password(FLAGS.password_length) new_pass = utils.generate_password(CONF.password_length)
max_tries = 10 max_tries = 10
@@ -1461,7 +1462,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.audit(_('Rescuing'), context=context, instance=instance) LOG.audit(_('Rescuing'), context=context, instance=instance)
admin_password = (rescue_password if rescue_password else admin_password = (rescue_password if rescue_password else
utils.generate_password(FLAGS.password_length)) utils.generate_password(CONF.password_length))
network_info = self._get_instance_nw_info(context, instance) network_info = self._get_instance_nw_info(context, instance)
image_meta = _get_image_meta(context, instance['image_ref']) image_meta = _get_image_meta(context, instance['image_ref'])
@@ -1684,7 +1685,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try: try:
same_host = instance['host'] == self.host same_host = instance['host'] == self.host
if same_host and not FLAGS.allow_resize_to_same_host: if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance['uuid']) self._set_instance_error_state(context, instance['uuid'])
msg = _('destination same as source!') msg = _('destination same as source!')
raise exception.MigrationError(msg) raise exception.MigrationError(msg)
@@ -2125,9 +2126,9 @@ class ComputeManager(manager.SchedulerDependentManager):
if console_type == 'novnc': if console_type == 'novnc':
# For essex, novncproxy_base_url must include the full path # For essex, novncproxy_base_url must include the full path
# including the html file (like http://myhost/vnc_auto.html) # including the html file (like http://myhost/vnc_auto.html)
access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token) access_url = '%s?token=%s' % (CONF.novncproxy_base_url, token)
elif console_type == 'xvpvnc': elif console_type == 'xvpvnc':
access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token) access_url = '%s?token=%s' % (CONF.xvpvncproxy_base_url, token)
else: else:
raise exception.ConsoleTypeInvalid(console_type=console_type) raise exception.ConsoleTypeInvalid(console_type=console_type)
@@ -2325,7 +2326,7 @@ class ComputeManager(manager.SchedulerDependentManager):
and None otherwise. and None otherwise.
""" """
src_compute_info = self._get_compute_info(ctxt, instance['host']) src_compute_info = self._get_compute_info(ctxt, instance['host'])
dst_compute_info = self._get_compute_info(ctxt, FLAGS.host) dst_compute_info = self._get_compute_info(ctxt, CONF.host)
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt, dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info, instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit) block_migration, disk_over_commit)
@@ -2632,7 +2633,7 @@ class ComputeManager(manager.SchedulerDependentManager):
If anything errors, we don't care. It's possible the instance If anything errors, we don't care. It's possible the instance
has been deleted, etc. has been deleted, etc.
""" """
heal_interval = FLAGS.heal_instance_info_cache_interval heal_interval = CONF.heal_instance_info_cache_interval
if not heal_interval: if not heal_interval:
return return
curr_time = time.time() curr_time = time.time()
@@ -2675,25 +2676,25 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task @manager.periodic_task
def _poll_rebooting_instances(self, context): def _poll_rebooting_instances(self, context):
if FLAGS.reboot_timeout > 0: if CONF.reboot_timeout > 0:
instances = self.db.instance_get_all_hung_in_rebooting( instances = self.db.instance_get_all_hung_in_rebooting(
context, FLAGS.reboot_timeout) context, CONF.reboot_timeout)
self.driver.poll_rebooting_instances(FLAGS.reboot_timeout, self.driver.poll_rebooting_instances(CONF.reboot_timeout,
instances) instances)
@manager.periodic_task @manager.periodic_task
def _poll_rescued_instances(self, context): def _poll_rescued_instances(self, context):
if FLAGS.rescue_timeout > 0: if CONF.rescue_timeout > 0:
self.driver.poll_rescued_instances(FLAGS.rescue_timeout) self.driver.poll_rescued_instances(CONF.rescue_timeout)
@manager.periodic_task @manager.periodic_task
def _poll_unconfirmed_resizes(self, context): def _poll_unconfirmed_resizes(self, context):
if FLAGS.resize_confirm_window > 0: if CONF.resize_confirm_window > 0:
migrations = self.db.migration_get_unconfirmed_by_dest_compute( migrations = self.db.migration_get_unconfirmed_by_dest_compute(
context, FLAGS.resize_confirm_window, self.host) context, CONF.resize_confirm_window, self.host)
migrations_info = dict(migration_count=len(migrations), migrations_info = dict(migration_count=len(migrations),
confirm_window=FLAGS.resize_confirm_window) confirm_window=CONF.resize_confirm_window)
if migrations_info["migration_count"] > 0: if migrations_info["migration_count"] > 0:
LOG.info(_("Found %(migration_count)d unconfirmed migrations " LOG.info(_("Found %(migration_count)d unconfirmed migrations "
@@ -2742,7 +2743,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task @manager.periodic_task
def _instance_usage_audit(self, context): def _instance_usage_audit(self, context):
if FLAGS.instance_usage_audit: if CONF.instance_usage_audit:
if not compute_utils.has_audit_been_run(context, self.host): if not compute_utils.has_audit_been_run(context, self.host):
begin, end = utils.last_completed_audit_period() begin, end = utils.last_completed_audit_period()
instances = self.db.instance_get_active_by_window_joined( instances = self.db.instance_get_active_by_window_joined(
@@ -2792,7 +2793,7 @@ class ComputeManager(manager.SchedulerDependentManager):
curr_time = time.time() curr_time = time.time()
if (curr_time - self._last_bw_usage_poll > if (curr_time - self._last_bw_usage_poll >
FLAGS.bandwidth_poll_interval): CONF.bandwidth_poll_interval):
self._last_bw_usage_poll = curr_time self._last_bw_usage_poll = curr_time
LOG.info(_("Updating bandwidth usage cache")) LOG.info(_("Updating bandwidth usage cache"))
@@ -2858,7 +2859,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task @manager.periodic_task
def _report_driver_status(self, context): def _report_driver_status(self, context):
curr_time = time.time() curr_time = time.time()
if curr_time - self._last_host_check > FLAGS.host_state_interval: if curr_time - self._last_host_check > CONF.host_state_interval:
self._last_host_check = curr_time self._last_host_check = curr_time
LOG.info(_("Updating host status")) LOG.info(_("Updating host status"))
# This will grab info about the host and queue it # This will grab info about the host and queue it
@@ -2866,7 +2867,7 @@ class ComputeManager(manager.SchedulerDependentManager):
capabilities = self.driver.get_host_stats(refresh=True) capabilities = self.driver.get_host_stats(refresh=True)
for capability in (capabilities if isinstance(capabilities, list) for capability in (capabilities if isinstance(capabilities, list)
else [capabilities]): else [capabilities]):
capability['host_ip'] = FLAGS.my_ip capability['host_ip'] = CONF.my_ip
self.update_service_capabilities(capabilities) self.update_service_capabilities(capabilities)
@manager.periodic_task(ticks_between_runs=10) @manager.periodic_task(ticks_between_runs=10)
@@ -3008,9 +3009,9 @@ class ComputeManager(manager.SchedulerDependentManager):
@manager.periodic_task @manager.periodic_task
def _reclaim_queued_deletes(self, context): def _reclaim_queued_deletes(self, context):
"""Reclaim instances that are queued for deletion.""" """Reclaim instances that are queued for deletion."""
interval = FLAGS.reclaim_instance_interval interval = CONF.reclaim_instance_interval
if interval <= 0: if interval <= 0:
LOG.debug(_("FLAGS.reclaim_instance_interval <= 0, skipping...")) LOG.debug(_("CONF.reclaim_instance_interval <= 0, skipping..."))
return return
instances = self.db.instance_get_all_by_host(context, self.host) instances = self.db.instance_get_all_by_host(context, self.host)
@@ -3044,7 +3045,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._resource_tracker_dict = new_resource_tracker_dict self._resource_tracker_dict = new_resource_tracker_dict
@manager.periodic_task( @manager.periodic_task(
ticks_between_runs=FLAGS.running_deleted_instance_poll_interval) ticks_between_runs=CONF.running_deleted_instance_poll_interval)
def _cleanup_running_deleted_instances(self, context): def _cleanup_running_deleted_instances(self, context):
"""Cleanup any instances which are erroneously still running after """Cleanup any instances which are erroneously still running after
having been deleted. having been deleted.
@@ -3065,7 +3066,7 @@ class ComputeManager(manager.SchedulerDependentManager):
should do in production), or automatically reaping the instances (more should do in production), or automatically reaping the instances (more
appropriate for dev environments). appropriate for dev environments).
""" """
action = FLAGS.running_deleted_instance_action action = CONF.running_deleted_instance_action
if action == "noop": if action == "noop":
return return
@@ -3093,7 +3094,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self._cleanup_volumes(context, instance['uuid'], bdms) self._cleanup_volumes(context, instance['uuid'], bdms)
else: else:
raise Exception(_("Unrecognized value '%(action)s'" raise Exception(_("Unrecognized value '%(action)s'"
" for FLAGS.running_deleted_" " for CONF.running_deleted_"
"instance_action"), locals(), "instance_action"), locals(),
instance=instance) instance=instance)
@@ -3103,7 +3104,7 @@ class ComputeManager(manager.SchedulerDependentManager):
should be pushed down to the virt layer for efficiency. should be pushed down to the virt layer for efficiency.
""" """
def deleted_instance(instance): def deleted_instance(instance):
timeout = FLAGS.running_deleted_instance_timeout timeout = CONF.running_deleted_instance_timeout
present = instance.name in present_name_labels present = instance.name in present_name_labels
erroneously_running = instance.deleted and present erroneously_running = instance.deleted and present
old_enough = (not instance.deleted_at or old_enough = (not instance.deleted_at or
@@ -3163,13 +3164,13 @@ class ComputeManager(manager.SchedulerDependentManager):
isinstance(e, exception.AggregateError)) isinstance(e, exception.AggregateError))
@manager.periodic_task( @manager.periodic_task(
ticks_between_runs=FLAGS.image_cache_manager_interval) ticks_between_runs=CONF.image_cache_manager_interval)
def _run_image_cache_manager_pass(self, context): def _run_image_cache_manager_pass(self, context):
"""Run a single pass of the image cache manager.""" """Run a single pass of the image cache manager."""
if not self.driver.capabilities["has_imagecache"]: if not self.driver.capabilities["has_imagecache"]:
return return
if FLAGS.image_cache_manager_interval == 0: if CONF.image_cache_manager_interval == 0:
return return
all_instances = self.db.instance_get_all(context) all_instances = self.db.instance_get_all(context)

View File

@@ -21,6 +21,7 @@ model.
from nova.compute import claims from nova.compute import claims
from nova.compute import vm_states from nova.compute import vm_states
from nova import config
from nova import context from nova import context
from nova import db from nova import db
from nova import exception from nova import exception
@@ -42,8 +43,8 @@ resource_tracker_opts = [
help='Class that will manage stats for the local compute host') help='Class that will manage stats for the local compute host')
] ]
FLAGS = flags.FLAGS CONF = config.CONF
FLAGS.register_opts(resource_tracker_opts) CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE
@@ -59,7 +60,7 @@ class ResourceTracker(object):
self.driver = driver self.driver = driver
self.nodename = nodename self.nodename = nodename
self.compute_node = None self.compute_node = None
self.stats = importutils.import_object(FLAGS.compute_stats_class) self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {} self.tracked_instances = {}
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-') @lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
@@ -307,8 +308,8 @@ class ResourceTracker(object):
self.stats.clear() self.stats.clear()
# set some intiial values, reserve room for host/hypervisor: # set some intiial values, reserve room for host/hypervisor:
resources['local_gb_used'] = FLAGS.reserved_host_disk_mb / 1024 resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
resources['memory_mb_used'] = FLAGS.reserved_host_memory_mb resources['memory_mb_used'] = CONF.reserved_host_memory_mb
resources['vcpus_used'] = 0 resources['vcpus_used'] = 0
resources['free_ram_mb'] = (resources['memory_mb'] - resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used']) resources['memory_mb_used'])

View File

@@ -18,14 +18,14 @@
Client side of the compute RPC API. Client side of the compute RPC API.
""" """
from nova import config
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova.openstack.common import jsonutils from nova.openstack.common import jsonutils
from nova.openstack.common import rpc from nova.openstack.common import rpc
import nova.openstack.common.rpc.proxy import nova.openstack.common.rpc.proxy
CONF = config.CONF
FLAGS = flags.FLAGS
def _compute_topic(topic, ctxt, host, instance): def _compute_topic(topic, ctxt, host, instance):
@@ -158,7 +158,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def __init__(self): def __init__(self):
super(ComputeAPI, self).__init__( super(ComputeAPI, self).__init__(
topic=FLAGS.compute_topic, topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION) default_version=self.BASE_RPC_API_VERSION)
def add_aggregate_host(self, ctxt, aggregate, host_param, host, def add_aggregate_host(self, ctxt, aggregate, host_param, host,
@@ -589,7 +589,7 @@ class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def __init__(self): def __init__(self):
super(SecurityGroupAPI, self).__init__( super(SecurityGroupAPI, self).__init__(
topic=FLAGS.compute_topic, topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION) default_version=self.BASE_RPC_API_VERSION)
def refresh_security_group_rules(self, ctxt, security_group_id, host): def refresh_security_group_rules(self, ctxt, security_group_id, host):

View File

@@ -22,6 +22,7 @@ import traceback
from nova import block_device from nova import block_device
from nova.compute import instance_types from nova.compute import instance_types
from nova import config
from nova import db from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
@@ -31,7 +32,7 @@ from nova.openstack.common import log
from nova.openstack.common.notifier import api as notifier_api from nova.openstack.common.notifier import api as notifier_api
from nova import utils from nova import utils
FLAGS = flags.FLAGS CONF = config.CONF
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@@ -86,7 +87,7 @@ def get_device_name_for_instance(context, instance, device):
except (TypeError, AttributeError, ValueError): except (TypeError, AttributeError, ValueError):
raise exception.InvalidDevicePath(path=mappings['root']) raise exception.InvalidDevicePath(path=mappings['root'])
# NOTE(vish): remove this when xenapi is setting default_root_device # NOTE(vish): remove this when xenapi is setting default_root_device
if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'): if CONF.compute_driver.endswith('xenapi.XenAPIDriver'):
prefix = '/dev/xvd' prefix = '/dev/xvd'
if req_prefix != prefix: if req_prefix != prefix:
LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals()) LOG.debug(_("Using %(prefix)s instead of %(req_prefix)s") % locals())
@@ -101,7 +102,7 @@ def get_device_name_for_instance(context, instance, device):
# NOTE(vish): remove this when xenapi is properly setting # NOTE(vish): remove this when xenapi is properly setting
# default_ephemeral_device and default_swap_device # default_ephemeral_device and default_swap_device
if FLAGS.compute_driver.endswith('xenapi.XenAPIDriver'): if CONF.compute_driver.endswith('xenapi.XenAPIDriver'):
instance_type_id = instance['instance_type_id'] instance_type_id = instance['instance_type_id']
instance_type = instance_types.get_instance_type(instance_type_id) instance_type = instance_types.get_instance_type(instance_type_id)
if instance_type['ephemeral_gb']: if instance_type['ephemeral_gb']:
@@ -184,11 +185,11 @@ def notify_about_instance_usage(context, instance, event_suffix,
:param extra_usage_info: Dictionary containing extra values to add or :param extra_usage_info: Dictionary containing extra values to add or
override in the notification. override in the notification.
:param host: Compute host for the instance, if specified. Default is :param host: Compute host for the instance, if specified. Default is
FLAGS.host CONF.host
""" """
if not host: if not host:
host = FLAGS.host host = CONF.host
if not extra_usage_info: if not extra_usage_info:
extra_usage_info = {} extra_usage_info = {}