137e359222
This bug only manifested itself after a heat request was made to the engine. Once a request was made a Qpid connection handler continued to wait in a select call, presumably for a subsequent request. Closing the connection immediately after detecting a keyboard interrupt and then terminating the thread yields a clean shutdown with no traceback. Closes #176 Change-Id: Ifdb82d33a6c52a004bbd6b08c564264f4140e800 Signed-off-by: Jeff Peeler <jpeeler@redhat.com>
242 lines
7.4 KiB
Python
242 lines
7.4 KiB
Python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
|
|
# Copyright 2010 United States Government as represented by the
|
|
# Administrator of the National Aeronautics and Space Administration.
|
|
# Copyright 2011 Justin Santa Barbara
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""Generic Node base class for all workers that run on hosts."""
|
|
|
|
import inspect
|
|
import os
|
|
|
|
import eventlet
|
|
import greenlet
|
|
|
|
from heat.openstack.common import rpc
|
|
from heat.openstack.common import cfg
|
|
from heat.openstack.common import importutils
|
|
from heat.openstack.common import log as logging
|
|
|
|
from heat.common import utils as heat_utils
|
|
from heat.common import exception
|
|
from heat.common import context
|
|
|
|
from heat import version
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
class Launcher(object):
|
|
"""
|
|
Launch one or more services and wait for them to complete.
|
|
"""
|
|
|
|
def __init__(self):
|
|
"""Initialize the service launcher.
|
|
|
|
:returns: None
|
|
|
|
"""
|
|
self._services = []
|
|
|
|
@staticmethod
|
|
def run_server(server):
|
|
"""Start and wait for a server to finish.
|
|
|
|
:param service: Server to run and wait for.
|
|
:returns: None
|
|
|
|
"""
|
|
server.start()
|
|
server.wait()
|
|
|
|
def launch_server(self, server):
|
|
"""Load and start the given server.
|
|
|
|
:param server: The server you would like to start.
|
|
:returns: None
|
|
|
|
"""
|
|
gt = eventlet.spawn(self.run_server, server)
|
|
self._services.append(gt)
|
|
|
|
def stop(self):
|
|
"""Stop all services which are currently running.
|
|
|
|
:returns: None
|
|
|
|
"""
|
|
for service in self._services:
|
|
service.kill()
|
|
|
|
def wait(self):
|
|
"""Waits until all services have been stopped, and then returns.
|
|
|
|
:returns: None
|
|
|
|
"""
|
|
for service in self._services:
|
|
try:
|
|
service.wait()
|
|
except greenlet.GreenletExit:
|
|
pass
|
|
|
|
|
|
class Service(object):
|
|
"""Service object for binaries running on hosts.
|
|
|
|
A service takes a manager and enables rpc by listening to queues based
|
|
on topic. It also periodically runs tasks on the manager and reports
|
|
it state to the database services table."""
|
|
|
|
def __init__(self, host, binary, topic, manager,
|
|
periodic_interval=None, *args, **kwargs):
|
|
self.host = host
|
|
self.binary = binary
|
|
self.topic = topic
|
|
self.manager_class_name = manager
|
|
manager_class = importutils.import_class(self.manager_class_name)
|
|
self.manager = manager_class(host=self.host, *args, **kwargs)
|
|
self.periodic_interval = periodic_interval
|
|
super(Service, self).__init__(*args, **kwargs)
|
|
self.saved_args, self.saved_kwargs = args, kwargs
|
|
self.timers = []
|
|
|
|
def start(self):
|
|
vcs_string = version.version_string_with_vcs()
|
|
LOG.info(_('Starting %(topic)s node (version %(vcs_string)s)'),
|
|
{'topic': self.topic, 'vcs_string': vcs_string})
|
|
# TODO do we need this ? -> utils.cleanup_file_locks()
|
|
self.manager.init_host()
|
|
self.model_disconnected = False
|
|
ctxt = context.get_admin_context()
|
|
# self._create_service_ref(ctxt)
|
|
|
|
self.conn = rpc.create_connection(new=True)
|
|
LOG.debug(_("Creating Consumer connection for Service %s") %
|
|
self.topic)
|
|
|
|
rpc_dispatcher = self.manager.create_rpc_dispatcher()
|
|
|
|
# Share this same connection for these Consumers
|
|
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=False)
|
|
|
|
node_topic = '%s.%s' % (self.topic, self.host)
|
|
self.conn.create_consumer(node_topic, rpc_dispatcher, fanout=False)
|
|
|
|
self.conn.create_consumer(self.topic, rpc_dispatcher, fanout=True)
|
|
|
|
# Consume from all consumers in a thread
|
|
self.conn.consume_in_thread()
|
|
|
|
if self.periodic_interval:
|
|
periodic = heat_utils.LoopingCall(self.periodic_tasks)
|
|
periodic.start(interval=self.periodic_interval, now=False)
|
|
self.timers.append(periodic)
|
|
|
|
def __getattr__(self, key):
|
|
manager = self.__dict__.get('manager', None)
|
|
return getattr(manager, key)
|
|
|
|
@classmethod
|
|
def create(cls, host=None, binary=None, topic=None, manager=None,
|
|
periodic_interval=None, config=None):
|
|
"""Instantiates class and passes back application object.
|
|
|
|
:param host: defaults to cfg.CONF.host
|
|
:param binary: defaults to basename of executable
|
|
:param topic: defaults to bin_name - 'heat-' part
|
|
:param manager: defaults to cfg.CONF.<topic>_manager
|
|
:param periodic_interval: defaults to cfg.CONF.periodic_interval
|
|
|
|
"""
|
|
if not host:
|
|
host = cfg.CONF.host
|
|
if not binary:
|
|
binary = os.path.basename(inspect.stack()[-1][1])
|
|
if not topic:
|
|
topic = binary.rpartition('heat-')[2]
|
|
if not manager:
|
|
manager = cfg.CONF.get('%s_manager' % topic, None)
|
|
if not periodic_interval:
|
|
periodic_interval = cfg.CONF.periodic_interval
|
|
service_obj = cls(host, binary, topic, manager,
|
|
periodic_interval)
|
|
|
|
return service_obj
|
|
|
|
def kill(self):
|
|
self.stop()
|
|
|
|
def stop(self):
|
|
# Try to shut the connection down, but if we get any sort of
|
|
# errors, go ahead and ignore them.. as we're shutting down anyway
|
|
try:
|
|
self.conn.close()
|
|
except Exception:
|
|
pass
|
|
for x in self.timers:
|
|
try:
|
|
x.stop()
|
|
except Exception:
|
|
pass
|
|
self.timers = []
|
|
|
|
def wait(self):
|
|
for x in self.timers:
|
|
try:
|
|
x.wait()
|
|
except Exception:
|
|
pass
|
|
|
|
def periodic_tasks(self, raise_on_error=False):
|
|
"""Tasks to be run at a periodic interval."""
|
|
ctxt = context.get_admin_context()
|
|
self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
|
|
|
|
|
|
# NOTE(vish): the global launcher is to maintain the existing
|
|
# functionality of calling service.serve +
|
|
# service.wait
|
|
_launcher = None
|
|
|
|
|
|
def serve(*servers):
|
|
global _launcher
|
|
if not _launcher:
|
|
_launcher = Launcher()
|
|
for server in servers:
|
|
_launcher.launch_server(server)
|
|
|
|
|
|
def wait():
|
|
LOG.debug(_('Full set of CONF:'))
|
|
for flag in cfg.CONF:
|
|
flag_get = cfg.CONF.get(flag, None)
|
|
# hide flag contents from log if contains a password
|
|
# should use secret flag when switch over to openstack-common
|
|
if ("_password" in flag or "_key" in flag or
|
|
(flag == "sql_connection" and "mysql:" in flag_get)):
|
|
LOG.debug(_('%(flag)s : FLAG SET ') % locals())
|
|
else:
|
|
LOG.debug('%(flag)s : %(flag_get)s' % locals())
|
|
try:
|
|
_launcher.wait()
|
|
rpc.cleanup()
|
|
except KeyboardInterrupt:
|
|
rpc.cleanup()
|
|
_launcher.stop()
|