Fix nova configuration loading
It seems some option handling within nova has changed, so we need to update our use of the configuration object. These changes ensure that the options we care about are registered before they are used. Change-Id: I0dd34f620eed5f43af6da5b02f9c41d35287c275 Signed-off-by: Doug Hellmann <doug.hellmann@dreamhost.com>
This commit is contained in:
parent
344ad3eef0
commit
2fe0a3c3ef
@ -24,9 +24,16 @@ import sys
|
||||
from ceilometer.service import prepare_service
|
||||
from ceilometer.openstack.common import cfg
|
||||
from nova import service
|
||||
from nova.compute import manager as compute_manager
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# Register the compute options from nova with our config object so
|
||||
# our pollsters can figure out which compute driver the hypervisor
|
||||
# is using.
|
||||
cfg.CONF.register_opts(compute_manager.compute_opts)
|
||||
|
||||
prepare_service(sys.argv)
|
||||
server = \
|
||||
service.Service.create(binary='ceilometer-agent',
|
||||
|
@ -30,8 +30,8 @@ from ceilometer.openstack.common import log
|
||||
from ceilometer.openstack.common import timeutils
|
||||
from ceilometer.openstack.common.rpc import dispatcher as rpc_dispatcher
|
||||
|
||||
# FIXME(dhellmann): There must be another way to do this.
|
||||
# Import rabbit_notifier to register notification_topics flag
|
||||
# FIXME(dhellmann): There must be another way to do this. Import
|
||||
# rabbit_notifier to register notification_topics flag
|
||||
import ceilometer.openstack.common.notifier.rabbit_notifier
|
||||
try:
|
||||
import nova.openstack.common.rpc as nova_rpc
|
||||
@ -66,7 +66,7 @@ class CollectorManager(manager.Manager):
|
||||
# invocation protocol (they do not include a "method"
|
||||
# parameter).
|
||||
self.connection.declare_topic_consumer(
|
||||
topic='%s.info' % flags.FLAGS.notification_topics[0],
|
||||
topic='%s.info' % cfg.CONF.notification_topics[0],
|
||||
callback=self.compute_handler.notify)
|
||||
|
||||
# Set ourselves up as a separate worker for the metering data,
|
||||
|
@ -19,11 +19,11 @@
|
||||
from lxml import etree
|
||||
|
||||
from nova import flags
|
||||
import nova.virt.connection
|
||||
|
||||
from ceilometer import counter
|
||||
from ceilometer import plugin
|
||||
from ceilometer.compute import instance as compute_instance
|
||||
from ceilometer.openstack.common import importutils
|
||||
from ceilometer.openstack.common import log
|
||||
from ceilometer.openstack.common import timeutils
|
||||
|
||||
@ -32,6 +32,19 @@ FLAGS = flags.FLAGS
|
||||
MIB = 2 ** 20 # mebibytes
|
||||
|
||||
|
||||
def get_libvirt_connection():
|
||||
"""Return an open connection for talking to libvirt."""
|
||||
# The direct-import implementation only works with Folsom because
|
||||
# the configuration setting changed.
|
||||
try:
|
||||
return importutils.import_object_ns('nova.virt',
|
||||
FLAGS.compute_driver)
|
||||
except ImportError:
|
||||
# Fall back to the way it was done in Essex.
|
||||
import nova.virt.connection
|
||||
return nova.virt.connection.get_connection(read_only=True)
|
||||
|
||||
|
||||
def make_counter_from_instance(instance, name, type, volume):
|
||||
return counter.Counter(
|
||||
source='?',
|
||||
@ -72,7 +85,7 @@ class DiskIOPollster(plugin.PollsterBase):
|
||||
|
||||
def get_counters(self, manager, context):
|
||||
if FLAGS.compute_driver == 'libvirt.LibvirtDriver':
|
||||
conn = nova.virt.connection.get_connection(read_only=True)
|
||||
conn = get_libvirt_connection()
|
||||
for instance in manager.db.instance_get_all_by_host(context,
|
||||
manager.host):
|
||||
# TODO(jd) This does not work see bug#998089
|
||||
@ -103,7 +116,7 @@ class CPUPollster(plugin.PollsterBase):
|
||||
LOG = log.getLogger(__name__ + '.cpu')
|
||||
|
||||
def get_counters(self, manager, context):
|
||||
conn = nova.virt.connection.get_connection(read_only=True)
|
||||
conn = get_libvirt_connection()
|
||||
# FIXME(dhellmann): How do we get a list of instances without
|
||||
# talking directly to the database?
|
||||
for instance in manager.db.instance_get_all_by_host(context,
|
||||
|
Loading…
Reference in New Issue
Block a user