Merge "Always close all passed in fds beyond sensible_fd_limit on launch"

This commit is contained in:
Zuul 2019-05-08 17:44:23 +00:00 committed by Gerrit Code Review
commit 1ddc8747f4
3 changed files with 20 additions and 5 deletions

View File

@ -114,12 +114,14 @@ syslog_log_level
``syslog_log_level=ERROR``
rlimit_nofile
Which rlimit for number of open file descriptors should be set for rootwrap
and its children processes by default. This is useful in case there is a
Specify rlimit for number of open file descriptors used by oslo rootwrap
and its child processes by default. This is useful in case there is a
excessively large ulimit configured for the calling process that shouldn't
inherit to oslo.rootwrap and its called processes. Will not attempt to raise
the limit. Defaults to 1024.
Ignored on platforms that do not provide "/proc/self/fd" (e.g. non-Linux).
.filters files
==============

View File

@ -29,5 +29,5 @@ syslog_log_level=ERROR
# Rootwrap daemon exits after this seconds of inactivity
daemon_timeout=600
# Rootwrap daemon limits itself to that many file descriptors
# Rootwrap daemon limits itself to that many file descriptors (Linux only)
rlimit_nofile=1024

View File

@ -33,6 +33,7 @@
from __future__ import print_function
import logging
import os
import sys
from oslo_rootwrap import subprocess
@ -91,12 +92,24 @@ def main(run_daemon=False):
RC_BADCONFIG, log=False)
if resource:
# When use close_fds=True on Python 2.x, we spend significant time
# in closing fds up to current soft ulimit, which could be large.
# When use close_fds=True on Python 2.x, calling subprocess with
# close_fds=True (which we do by default) can be inefficient when
# the current fd ulimits are large, because it blindly closes
# all fds in the range(1, $verylargenumber)
# Lower our ulimit to a reasonable value to regain performance.
fd_limits = resource.getrlimit(resource.RLIMIT_NOFILE)
sensible_fd_limit = min(config.rlimit_nofile, fd_limits[0])
if (fd_limits[0] > sensible_fd_limit):
# Close any fd beyond sensible_fd_limit prior adjusting our
# rlimit to ensure all fds are closed
for fd_entry in os.listdir('/proc/self/fd'):
# NOTE(dmllr): In a previous patch revision non-numeric
# dir entries were silently ignored which reviewers
# didn't like. Readd exception handling when it occurs.
fd = int(fd_entry)
if fd >= sensible_fd_limit:
os.close(fd)
# Unfortunately this inherits to our children, so allow them to
# re-raise by passing through the hard limit unmodified
resource.setrlimit(