Merge "cleanup openstack-common.conf and sync updated files"

This commit is contained in:
Jenkins 2015-06-10 20:23:10 +00:00 committed by Gerrit Code Review
commit b3d30b95b2
6 changed files with 39 additions and 35 deletions

View File

@ -143,7 +143,7 @@ def initialize_if_enabled():
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d'),
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,

View File

@ -84,9 +84,9 @@ class FixedIntervalLoopingCall(LoopingCallBase):
break
delay = end - start - interval
if delay > 0:
LOG.warn(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
{'func_name': self.f, 'delay': delay})
LOG.warning(_LW('task %(func_name)r run outlasted '
'interval by %(delay).2f sec'),
{'func_name': self.f, 'delay': delay})
greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()

View File

@ -222,11 +222,11 @@ class PeriodicTasks(object):
try:
task(self, context)
except Exception as e:
except Exception:
if raise_on_error:
raise
LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
{"full_task_name": full_task_name, "e": e})
LOG.exception(_LE("Error during %(full_task_name)s"),
{"full_task_name": full_task_name})
time.sleep(0)
return idle_for

View File

@ -18,6 +18,7 @@
"""Generic Node base class for all workers that run on hosts."""
import errno
import io
import logging
import os
import random
@ -25,14 +26,6 @@ import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo_config import cfg
@ -59,15 +52,15 @@ def _is_daemon():
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except io.UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
@ -206,12 +199,16 @@ class ProcessLauncher(object):
for handler in cls._signal_handlers_set:
handler(*args, **kwargs)
def __init__(self):
"""Constructor."""
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
@ -230,7 +227,7 @@ class ProcessLauncher(object):
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
self.readpipe.read(1)
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
@ -238,15 +235,12 @@ class ProcessLauncher(object):
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
# Parent signals with SIGTERM when it wants us to go away.
signal.signal(signal.SIGTERM, signal.SIG_DFL)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
@ -337,8 +331,8 @@ class ProcessLauncher(object):
def _wait_child(self):
try:
# Block while any of child processes have exited
pid, status = os.waitpid(0, 0)
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
@ -367,6 +361,10 @@ class ProcessLauncher(object):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
@ -391,8 +389,14 @@ class ProcessLauncher(object):
if not _is_sighup_and_daemon(self.sigcaught):
break
cfg.CONF.reload_config_files()
for service in set(
[wrap.service for wrap in self.children.values()]):
service.reset()
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:

View File

@ -17,6 +17,7 @@ import threading
import eventlet
from eventlet import greenpool
from magnum.openstack.common._i18n import _LE
from magnum.openstack.common import loopingcall
@ -98,15 +99,15 @@ class ThreadGroup(object):
x.stop()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
except Exception:
LOG.exception(_LE('Error stopping thread.'))
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
except Exception:
LOG.exception(_LE('Error stopping timer.'))
self.timers = []
def stop(self, graceful=False):
@ -132,8 +133,8 @@ class ThreadGroup(object):
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
except Exception:
LOG.exception(_LE('Error waiting on ThreadGroup.'))
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't

View File

@ -7,7 +7,6 @@ module=loopingcall
module=periodic_task
module=service
module=systemd
module=versionutils
module=threadgroup
# The base module to hold the copy of openstack.common