Do not change process UID

When run with sudo, Anvil changed process real UID
to SUDO_UID thus easily creating files with different owners.
This mechanism can confuse some tools (like pip)
Now, we can almost safely stop changing real UID and run
prepare as non-root and bootstrap, install, start, status,
and stop as root.

Also, this removes dependency on sudo.

Implements: blueprint no-uid-tricks
Fixes: bug #1179747
Fixes: bug #1186440
Fixes: bug #1186448

Change-Id: I7ae293aad7f0a5ba08962e6b6b709fe49b8b81ec
This commit is contained in:
Alessio Ababilov 2013-05-31 11:09:37 +04:00 committed by Joshua Harlow
parent 578a49860a
commit 0ba7f0e03b
27 changed files with 252 additions and 349 deletions

View File

@ -148,8 +148,7 @@ def ensure_anvil_dirs(root_dir):
if sh.isdir(d):
continue
LOG.info("Creating anvil directory at path: %s", d)
with sh.Rooted(True):
sh.mkdir(d, adjust_suids=True)
sh.mkdir(d)
def store_current_settings(c_settings):
@ -159,14 +158,11 @@ def store_current_settings(c_settings):
for k in ['action', 'verbose', 'dryrun']:
if k in c_settings:
to_save.pop(k, None)
with sh.Rooted(True):
with open("/etc/anvil/settings.yaml", 'w') as fh:
fh.write("# Anvil last used settings\n")
fh.write(utils.add_header("/etc/anvil/settings.yaml",
utils.prettify_yaml(to_save)))
fh.flush()
(uid, gid) = sh.get_suids()
sh.chown("/etc/anvil/settings.yaml", uid, gid)
with open("/etc/anvil/settings.yaml", 'w') as fh:
fh.write("# Anvil last used settings\n")
fh.write(utils.add_header("/etc/anvil/settings.yaml",
utils.prettify_yaml(to_save)))
fh.flush()
except Exception as e:
LOG.debug("Failed writing to %s due to %s", "/etc/anvil/settings.yaml", e)
@ -175,8 +171,6 @@ def ensure_perms():
# Ensure we are running as root to start...
if not sh.got_root():
raise excp.PermException("Root access required")
# Drop to usermode (which also ensures we can do this...)
sh.user_mode(quiet=False)
def main():

View File

@ -177,7 +177,6 @@ class PkgUninstallComponent(base.Component):
super(PkgUninstallComponent, self).__init__(*args, **kargs)
trace_fn = tr.trace_filename(self.get_option('trace_dir'), 'created')
self.tracereader = tr.TraceReader(trace_fn)
self.purge_packages = kargs.get('purge_packages')
def unconfigure(self):
self._unconfigure_links()
@ -188,7 +187,7 @@ class PkgUninstallComponent(base.Component):
utils.log_iterable(sym_files, logger=LOG,
header="Removing %s symlink files" % (len(sym_files)))
for fn in sym_files:
sh.unlink(fn, run_as_root=True)
sh.unlink(fn)
def post_uninstall(self):
self._uninstall_files()
@ -203,7 +202,7 @@ class PkgUninstallComponent(base.Component):
utils.log_iterable(files_touched, logger=LOG,
header="Removing %s miscellaneous files" % (len(files_touched)))
for fn in files_touched:
sh.unlink(fn, run_as_root=True)
sh.unlink(fn)
def _uninstall_dirs(self):
dirs_made = self.tracereader.dirs_made()
@ -212,4 +211,4 @@ class PkgUninstallComponent(base.Component):
utils.log_iterable(dirs_alive, logger=LOG,
header="Removing %s created directories" % (len(dirs_alive)))
for dir_name in dirs_alive:
sh.deldir(dir_name, run_as_root=True)
sh.deldir(dir_name)

View File

@ -136,7 +136,7 @@ class PythonTestingComponent(base.Component):
if self.get_bool_option("verbose", default_value=False):
null_fh = None
try:
sh.execute(*cmd, stdout_fh=None, stderr_fh=null_fh, cwd=app_dir, env_overrides=env)
sh.execute(cmd, stdout_fh=None, stderr_fh=null_fh, cwd=app_dir, env_overrides=env)
except excp.ProcessExecutionError as e:
if self.get_bool_option("ignore-test-failures", default_value=False):
LOG.warn("Ignoring test failure of component %s: %s", colorizer.quote(self.name), e)

View File

@ -45,7 +45,7 @@ class CinderInstaller(binstall.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing cinder to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD, 'run_as_root': True}]
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
def config_params(self, config_fn):

View File

@ -72,7 +72,7 @@ class GlanceConfigurator(base.Configurator):
LOG.debug("Ensuring file system store directory %r exists and is empty." % (img_store_dir))
if sh.isdir(img_store_dir):
sh.deldir(img_store_dir)
sh.mkdirslist(img_store_dir, tracewriter=self.installer.tracewriter, adjust_suids=True)
sh.mkdirslist(img_store_dir, tracewriter=self.installer.tracewriter)
def _config_adjust_reg(self, config):
self._config_adjust_api_reg(config)

View File

@ -154,7 +154,7 @@ class DBRuntime(bruntime.ProgramRuntime):
cmd = self._get_command(action)
if not cmd:
raise NotImplementedError("No distro command provided to perform action %r" % (action))
return sh.execute(*cmd, run_as_root=True, check_exit_code=check_exit_code)
return sh.execute(cmd, check_exit_code=check_exit_code)
def start(self):
if self.statii()[0].status != bruntime.STATUS_STARTED:

View File

@ -52,7 +52,7 @@ class GlanceInstaller(binstall.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing glance to database: %s", colorizer.quote(self.configurator.DB_NAME))
cmds = [{'cmd': SYNC_DB_CMD, 'run_as_root': True}]
cmds = [{'cmd': SYNC_DB_CMD}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
@property

View File

@ -46,7 +46,6 @@ def drop_db(distro, dbtype, user, pw, dbname, **kwargs):
cmds = list()
cmds.append({
'cmd': dropcmd,
'run_as_root': False,
})
utils.execute_template(*cmds, params=params)
else:
@ -67,7 +66,6 @@ def create_db(distro, dbtype, user, pw, dbname, **kwargs):
cmds = list()
cmds.append({
'cmd': createcmd,
'run_as_root': False,
})
utils.execute_template(*cmds, params=params)
else:

View File

@ -93,9 +93,8 @@ class NetworkCleaner(object):
continue
cwd = ''
cmdline = ''
with sh.Rooted(True):
cwd = proc.getcwd()
cmdline = " ".join(proc.cmdline)
cwd = proc.getcwd()
cmdline = " ".join(proc.cmdline)
to_try = False
for t in [cwd, cmdline]:
if t.lower().find("nova") != -1:
@ -107,8 +106,7 @@ class NetworkCleaner(object):
header="Killing leftover nova dnsmasq processes with process ids",
logger=nconf.LOG)
for pid in to_kill:
with sh.Rooted(True):
sh.kill(pid)
sh.kill(pid)
def _clean_iptables(self):
# Nova doesn't seem to cleanup its iptables rules that it
@ -136,7 +134,7 @@ class NetworkCleaner(object):
# Isolate the nova rules
clean_rules = []
list_cmd = ['iptables', '--list-rules', '--verbose']
(stdout, _stderr) = sh.execute(*list_cmd, run_as_root=True)
(stdout, _stderr) = sh.execute(list_cmd)
for line in stdout.splitlines():
line = line.strip()
if not line_matcher(line, "-A"):
@ -149,7 +147,7 @@ class NetworkCleaner(object):
# Isolate the nova nat rules
clean_nats = []
nat_cmd = ['iptables', '--list-rules', '--verbose', '--table', 'nat']
(stdout, _stderr) = sh.execute(*nat_cmd, run_as_root=True)
(stdout, _stderr) = sh.execute(nat_cmd)
for line in stdout.splitlines():
line = line.strip()
if not line_matcher(line, "-A"):
@ -162,7 +160,7 @@ class NetworkCleaner(object):
# Isolate the nova chains
clean_chains = []
chain_cmd = ['iptables', '--list-rules', '--verbose']
(stdout, _stderr) = sh.execute(*chain_cmd, run_as_root=True)
(stdout, _stderr) = sh.execute(chain_cmd)
for line in stdout.splitlines():
if not line_matcher(line, "-N"):
continue
@ -174,7 +172,7 @@ class NetworkCleaner(object):
# Isolate the nova nat chains
clean_nat_chains = []
nat_chain_cmd = ['iptables', '--list-rules', '--verbose', '--table', 'nat']
(stdout, _stderr) = sh.execute(*nat_chain_cmd, run_as_root=True)
(stdout, _stderr) = sh.execute(nat_chain_cmd)
for line in stdout.splitlines():
if not line_matcher(line, "-N"):
continue
@ -187,11 +185,11 @@ class NetworkCleaner(object):
for r in clean_rules + clean_chains:
pieces = r.split(None)
pieces = ['iptables'] + pieces
sh.execute(*pieces, run_as_root=True, shell=True)
sh.execute(pieces, shell=True)
for r in clean_nats + clean_nat_chains:
pieces = r.split(None)
pieces = ['iptables', '--table', 'nat'] + pieces
sh.execute(*pieces, run_as_root=True, shell=True)
sh.execute(pieces, shell=True)
def clean(self):
self._stop_dnsmasq()

View File

@ -59,7 +59,7 @@ class Virsh(object):
def _service_status(self):
cmd = self.distro.get_command('libvirt', 'status')
(stdout, stderr) = sh.execute(*cmd, run_as_root=True, check_exit_code=False)
(stdout, stderr) = sh.execute(cmd, check_exit_code=False)
combined = (stdout + stderr)
if combined.lower().find("running") != -1 or combined.lower().find('start') != -1:
return (_ALIVE, combined)
@ -78,7 +78,7 @@ class Virsh(object):
def restart_service(self):
cmd = self.distro.get_command('libvirt', 'restart')
sh.execute(*cmd, run_as_root=True)
sh.execute(cmd)
def wait_active(self):
# TODO(harlowja) fix this by using the component wait active...
@ -99,7 +99,6 @@ class Virsh(object):
self.wait_active()
cmds = [{
'cmd': self.distro.get_command('libvirt', 'verify'),
'run_as_root': True,
}]
mp = {
'VIRT_PROTOCOL': virt_protocol,
@ -123,30 +122,29 @@ class Virsh(object):
if not virt_protocol:
LOG.warn("Could not clear out libvirt domains, no known protocol for virt type: %s", colorizer.quote(virt_type))
return
with sh.Rooted(True):
LOG.info("Attempting to clear out leftover libvirt domains using protocol: %s", colorizer.quote(virt_protocol))
LOG.info("Attempting to clear out leftover libvirt domains using protocol: %s", colorizer.quote(virt_protocol))
try:
self.restart_service()
self.wait_active()
except (excp.StartException, IOError) as e:
LOG.warn("Could not restart the libvirt daemon due to: %s", e)
return
try:
conn = libvirt.open(virt_protocol)
except libvirt.libvirtError as e:
LOG.warn("Could not connect to libvirt using protocol %s due to: %s", colorizer.quote(virt_protocol), e)
return
with contextlib.closing(conn) as ch:
try:
self.restart_service()
self.wait_active()
except (excp.StartException, IOError) as e:
LOG.warn("Could not restart the libvirt daemon due to: %s", e)
return
try:
conn = libvirt.open(virt_protocol)
except libvirt.libvirtError as e:
LOG.warn("Could not connect to libvirt using protocol %s due to: %s", colorizer.quote(virt_protocol), e)
return
with contextlib.closing(conn) as ch:
try:
defined_domains = ch.listDefinedDomains()
kill_domains = list()
for domain in defined_domains:
if domain.startswith(inst_prefix):
kill_domains.append(domain)
if kill_domains:
utils.log_iterable(kill_domains, logger=LOG,
header="Found %s old domains to destroy" % (len(kill_domains)))
for domain in sorted(kill_domains):
self._destroy_domain(libvirt, ch, domain)
except libvirt.libvirtError, e:
LOG.warn("Could not clear out libvirt domains due to: %s", e)
defined_domains = ch.listDefinedDomains()
kill_domains = list()
for domain in defined_domains:
if domain.startswith(inst_prefix):
kill_domains.append(domain)
if kill_domains:
utils.log_iterable(kill_domains, logger=LOG,
header="Found %s old domains to destroy" % (len(kill_domains)))
for domain in sorted(kill_domains):
self._destroy_domain(libvirt, ch, domain)
except libvirt.libvirtError, e:
LOG.warn("Could not clear out libvirt domains due to: %s", e)

View File

@ -82,11 +82,10 @@ class HorizonInstaller(binstall.PythonInstallComponent):
utils.log_iterable(log_fns, logger=LOG,
header="Adjusting %s log files" % (len(log_fns)))
for fn in log_fns:
with sh.Rooted(True):
if clear:
sh.unlink(fn, True)
sh.touch_file(fn, die_if_there=False, tracewriter=self.tracewriter)
sh.chmod(fn, 0666)
if clear:
sh.unlink(fn, True)
sh.touch_file(fn, die_if_there=False, tracewriter=self.tracewriter)
sh.chmod(fn, 0666)
return len(log_fns)
def _configure_files(self):
@ -137,7 +136,7 @@ class HorizonRuntime(bruntime.ProgramRuntime):
cmd = self.distro.get_command('apache', action)
if not cmd:
raise NotImplementedError("No distro command provided to perform action %r" % (action))
return sh.execute(*cmd, run_as_root=True, check_exit_code=check_exit_code)
return sh.execute(cmd, check_exit_code=check_exit_code)
def restart(self):
self._run_action('restart')

View File

@ -63,7 +63,7 @@ class KeystoneInstaller(binstall.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing keystone to database: %s", colorizer.quote(self.configurator.DB_NAME))
sync_cmd = MANAGE_CMD + ['db_sync']
cmds = [{'cmd': sync_cmd, 'run_as_root': True}]
cmds = [{'cmd': sync_cmd}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
@property
@ -86,9 +86,9 @@ class KeystoneInstaller(binstall.PythonInstallComponent):
LOG.info("Setting up keystone's pki support.")
for value in kconf.PKI_FILES.values():
sh.mkdirslist(sh.dirname(sh.joinpths(self.configurator.link_dir, value)),
tracewriter=self.tracewriter, adjust_suids=True)
tracewriter=self.tracewriter)
pki_cmd = MANAGE_CMD + ['pki_setup']
cmds = [{'cmd': pki_cmd, 'run_as_root': True}]
cmds = [{'cmd': pki_cmd}]
utils.execute_template(*cmds, cwd=self.bin_dir, params=self.config_params(None))
def warm_configs(self):

View File

@ -35,7 +35,7 @@ NET_INITED_FN = 'nova.network.inited.yaml'
# This makes the database be in sync with nova
DB_SYNC_CMD = [
{'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE', 'db', 'sync'], 'run_as_root': True},
{'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE', 'db', 'sync']},
]
# Used to create a fixed network when initializating nova
@ -43,7 +43,6 @@ FIXED_NET_CMDS = [
{
'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE',
'network', 'create', 'private', '$FIXED_RANGE', '1', '$FIXED_NETWORK_SIZE'],
'run_as_root': True,
},
]
@ -51,12 +50,10 @@ FIXED_NET_CMDS = [
FLOATING_NET_CMDS = [
{
'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE', 'floating', 'create', '$FLOATING_RANGE'],
'run_as_root': True,
},
{
'cmd': ['$BIN_DIR/nova-manage', '--config-file', '$CFG_FILE',
'floating', 'create', '--ip_range=$TEST_FLOATING_RANGE', '--pool=$TEST_FLOATING_POOL'],
'run_as_root': True,
},
]

View File

@ -43,7 +43,7 @@ class QuantumInstaller(binstall.PythonInstallComponent):
def _sync_db(self):
LOG.info("Syncing quantum to database: %s", colorizer.quote(self.configurator.DB_NAME))
#cmds = [{"cmd": SYNC_DB_CMD, "run_as_root": True}]
#cmds = [{"cmd": SYNC_DB_CMD}]
#utils.execute_template(*cmds, cwd=self.bin_dir,
# params=self.config_params(None))

View File

@ -43,7 +43,7 @@ class RabbitUninstaller(binstall.PkgUninstallComponent):
self.runtime.start()
self.runtime.wait_active()
cmd = self.distro.get_command('rabbit-mq', 'change_password') + [RESET_BASE_PW]
sh.execute(*cmd, run_as_root=True)
sh.execute(cmd)
LOG.info("Restarting so that your rabbit-mq password is reflected.")
self.runtime.restart()
self.runtime.wait_active()
@ -67,7 +67,7 @@ class RabbitInstaller(binstall.PkgInstallComponent):
self.runtime.wait_active()
cmd = list(self.distro.get_command('rabbit-mq', 'change_password'))
cmd += [user_id, rhelper.get_shared_passwords(self)['pw']]
sh.execute(*cmd, run_as_root=True)
sh.execute(cmd)
LOG.info("Restarting so that your rabbit-mq password is reflected.")
self.runtime.restart()
self.runtime.wait_active()
@ -123,7 +123,7 @@ class RabbitRuntime(bruntime.ProgramRuntime):
# RHEL seems to have this bug also...
with TemporaryFile() as s_fh:
with TemporaryFile() as e_fh:
sh.execute(*cmd, run_as_root=True,
sh.execute(cmd,
stdout_fh=s_fh, stderr_fh=e_fh,
check_exit_code=check_exit_code)
# Read from the file handles instead of the typical output...

View File

@ -65,8 +65,7 @@ class DBInstaller(db.DBInstaller):
new_lines.append('bind-address = 0.0.0.0')
else:
new_lines.append(line)
with sh.Rooted(True):
sh.write_file_and_backup(DBInstaller.MYSQL_CONF, utils.joinlinesep(*new_lines))
sh.write_file_and_backup(DBInstaller.MYSQL_CONF, utils.joinlinesep(*new_lines))
class HorizonInstaller(horizon.HorizonInstaller):
@ -92,8 +91,7 @@ class HorizonInstaller(horizon.HorizonInstaller):
if re.match(r"^\s*Listen\s+(.*)$", line, re.I):
line = "Listen 0.0.0.0:80"
new_lines.append(line)
with sh.Rooted(True):
sh.write_file_and_backup(HorizonInstaller.HTTPD_CONF, utils.joinlinesep(*new_lines))
sh.write_file_and_backup(HorizonInstaller.HTTPD_CONF, utils.joinlinesep(*new_lines))
def _config_fixups(self):
self._config_fix_httpd()
@ -118,11 +116,10 @@ class RabbitRuntime(rabbit.RabbitRuntime):
# And not trying to run this service directly...
base_dir = sh.joinpths("/var/log", 'rabbitmq')
if sh.isdir(base_dir):
with sh.Rooted(True):
# Seems like we need root perms to list that directory...
for fn in sh.listdir(base_dir):
if re.match("(.*?)(err|log)$", fn, re.I):
sh.chmod(sh.joinpths(base_dir, fn), 0666)
# Seems like we need root perms to list that directory...
for fn in sh.listdir(base_dir):
if re.match("(.*?)(err|log)$", fn, re.I):
sh.chmod(sh.joinpths(base_dir, fn), 0666)
def start(self):
self._fix_log_dir()
@ -155,11 +152,10 @@ class NovaInstaller(nova.NovaInstaller):
# Create a libvirtd user group
if not sh.group_exists('libvirtd'):
cmd = ['groupadd', 'libvirtd']
sh.execute(*cmd, run_as_root=True)
sh.execute(cmd)
if not sh.isfile(LIBVIRT_POLICY_FN):
contents = self._get_policy(self._get_policy_users())
with sh.Rooted(True):
sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
sh.write_file(LIBVIRT_POLICY_FN, contents)
sh.mkdirslist(sh.dirname(LIBVIRT_POLICY_FN))
sh.write_file(LIBVIRT_POLICY_FN, contents)
configs_made += 1
return configs_made

View File

@ -76,13 +76,13 @@ class GitDownloader(Downloader):
LOG.info("Existing git directory located at %s, leaving it alone.", colorizer.quote(self.store_where))
# do git clean -xdfq and git reset --hard to undo possible changes
cmd = ["git", "clean", "-xdfq"]
sh.execute(*cmd, cwd=self.store_where)
sh.execute(cmd, cwd=self.store_where)
cmd = ["git", "reset", "--hard"]
sh.execute(*cmd, cwd=self.store_where)
sh.execute(cmd, cwd=self.store_where)
else:
LOG.info("Downloading %s (%s) to %s.", colorizer.quote(uri), branch, colorizer.quote(self.store_where))
cmd = ["git", "clone", uri, self.store_where]
sh.execute(*cmd)
sh.execute(cmd)
if tag:
LOG.info("Adjusting to tag %s.", colorizer.quote(tag))
else:
@ -91,13 +91,13 @@ class GitDownloader(Downloader):
# newer git allows branch resetting: git checkout -B $new_branch
# so, all these are for compatibility with older RHEL git
cmd = ["git", "rev-parse", "HEAD"]
git_head = sh.execute(*cmd, cwd=self.store_where)[0].strip()
git_head = sh.execute(cmd, cwd=self.store_where)[0].strip()
cmd = ["git", "checkout", git_head]
sh.execute(*cmd, cwd=self.store_where)
sh.execute(cmd, cwd=self.store_where)
cmd = ["git", "branch", "-D", new_branch]
sh.execute(*cmd, cwd=self.store_where, ignore_exit_code=True)
sh.execute(cmd, cwd=self.store_where, check_exit_code=False)
cmd = ["git", "checkout"] + checkout_what
sh.execute(*cmd, cwd=self.store_where)
sh.execute(cmd, cwd=self.store_where)
class UrlLibDownloader(Downloader):

View File

@ -103,7 +103,7 @@ class DependencyHandler(object):
python_names = []
for pkg_dir in package_dirs:
cmdline = ["python", "setup.py", "--name"]
python_names.append(sh.execute(*cmdline, cwd=pkg_dir)[0].
python_names.append(sh.execute(cmdline, cwd=pkg_dir)[0].
splitlines()[-1].strip())
return python_names
@ -171,7 +171,7 @@ class DependencyHandler(object):
]
cmdline = cmdline + extra_pips + ["-r"] + requires_files
output = sh.execute(*cmdline, ignore_exit_code=True)
output = sh.execute(cmdline, check_exit_code=False)
conflict_descr = output[1].strip()
forced_keys = set()
if conflict_descr:
@ -252,5 +252,5 @@ class DependencyHandler(object):
LOG.info("You can watch progress in another terminal with")
LOG.info(" tail -f %s" % out_filename)
with open(out_filename, "w") as out:
sh.execute(*cmdline, stdout_fh=out, stderrr_fh=out)
sh.execute(cmdline, stdout_fh=out, stderr_fh=out)
return sh.listdir(self.download_dir, files_only=True)

View File

@ -72,7 +72,7 @@ class GitChangeLog(object):
def _get_commit_detail(self, commit, field, am=1):
detail_cmd = ['git', 'log', '--color=never', '-%s' % (am), "--pretty=format:%s" % (field), commit]
(stdout, _stderr) = sh.execute(*detail_cmd, cwd=self.wkdir)
(stdout, _stderr) = sh.execute(detail_cmd, cwd=self.wkdir)
ret = stdout.strip('\n').splitlines()
if len(ret) == 1:
ret = ret[0]
@ -106,7 +106,7 @@ class GitChangeLog(object):
def _get_log(self):
log_cmd = ['git', 'log', '--pretty=oneline', '--color=never']
(sysout, _stderr) = sh.execute(*log_cmd, cwd=self.wkdir)
(sysout, _stderr) = sh.execute(log_cmd, cwd=self.wkdir)
lines = sysout.strip('\n').splitlines()
# Extract the raw commit details

View File

@ -82,7 +82,7 @@ class Helper(object):
def _list_installed(self):
cmd = [self._pip_how] + FREEZE_CMD
(stdout, _stderr) = sh.execute(*cmd, run_as_root=True)
(stdout, _stderr) = sh.execute(cmd)
return parse_requirements(stdout, True)
def uncache(self):

View File

@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from anvil import shell as sh
# See http://yum.baseurl.org/api/yum-3.2.26/yum-module.html
from yum import YumBase
@ -53,11 +51,8 @@ class Helper(object):
@staticmethod
def _get_yum_base():
if Helper._yum_base is None:
# This 'root' seems needed...
# otherwise 'cannot open Packages database in /var/lib/rpm' starts to happen
with sh.Rooted(True):
_yum_base = YumBase()
_yum_base.setCacheDir(force=True)
_yum_base = YumBase()
_yum_base.setCacheDir(force=True)
Helper._yum_base = _yum_base
return Helper._yum_base
@ -69,23 +64,17 @@ class Helper(object):
def get_available(self):
base = Helper._get_yum_base()
with sh.Rooted(True):
pkgs = base.doPackageLists()
avail = list(pkgs.available)
avail.extend(pkgs.installed)
return avail
pkgs = base.doPackageLists()
avail = list(pkgs.available)
avail.extend(pkgs.installed)
return avail
def get_installed(self, name):
base = Helper._get_yum_base()
# This 'root' seems needed...
# otherwise 'cannot open Packages database in /var/lib/rpm' starts to happen
# even though we are just doing a read-only operation, which
# is pretty odd...
with sh.Rooted(True):
pkgs = base.doPackageLists(pkgnarrow='installed',
ignore_case=True, patterns=[name])
if pkgs.installed:
whats_installed = list(pkgs.installed)
else:
whats_installed = []
pkgs = base.doPackageLists(pkgnarrow='installed',
ignore_case=True, patterns=[name])
if pkgs.installed:
whats_installed = list(pkgs.installed)
else:
whats_installed = []
return whats_installed

View File

@ -118,7 +118,7 @@ class YumDependencyHandler(base.DependencyHandler):
Version: %s.%s.%s
Release: 0
License: Apache 2.0
Summary: Python dependencies for OpenStack
Summary: OpenStack dependencies
BuildArch: noarch
""" % (self.OPENSTACK_DEPS_PACKAGE_NAME, today.year, today.month, today.day)
@ -140,7 +140,13 @@ BuildArch: noarch
}
for pack_name in sorted(packages.iterkeys()):
pack = packages[pack_name]
spec_content += "Requires: %s\n" % pack["name"]
cont = [spec_content, "Requires: ", pack["name"]]
version = pack.get("version")
if version:
cont.append(" ")
cont.append(version)
cont.append("\n")
spec_content = "".join(cont)
for script_name in script_map.iterkeys():
try:
script_list = pack[script_name]
@ -186,7 +192,7 @@ BuildArch: noarch
spec_filename,
]
LOG.info("Building %s RPM" % self.OPENSTACK_DEPS_PACKAGE_NAME)
sh.execute(*cmdline)
sh.execute(cmdline)
def _build_dependencies(self):
package_files = self.download_dependencies()
@ -205,7 +211,7 @@ BuildArch: noarch
LOG.info(" tail -f %s" % out_filename)
with open(out_filename, "w") as out:
try:
sh.execute(*cmdline, stdout_fh=out, stderr_fh=out)
sh.execute(cmdline, stdout_fh=out, stderr_fh=out)
except excp.ProcessExecutionError:
LOG.error("Some packages failed to build.")
LOG.error("That's usually not a big deal,"
@ -223,7 +229,7 @@ BuildArch: noarch
LOG.info("You can watch progress in another terminal with")
LOG.info(" tail -f %s" % out_filename)
with open(out_filename, "w") as out:
sh.execute(*cmdline, stdout_fh=out, stderr_fh=out)
sh.execute(cmdline, stdout_fh=out, stderr_fh=out)
def _create_deps_repo(self):
for filename in sh.listdir(sh.joinpths(self.rpmbuild_dir, "RPMS"),
@ -235,7 +241,7 @@ BuildArch: noarch
for repo_dir in self.deps_repo_dir, self.deps_src_repo_dir:
cmdline = ["createrepo", repo_dir]
LOG.info("Creating repo at %s" % repo_dir)
sh.execute(*cmdline)
sh.execute(cmdline)
LOG.info("Writing anvil.repo to %s" % self.anvil_repo_filename)
(_fn, content) = utils.load_template('packaging', 'anvil.repo')
params = {"baseurl_bin": "file://%s" % self.deps_repo_dir,
@ -249,8 +255,7 @@ BuildArch: noarch
cmdline = [self.py2rpm_executable, "--convert"] + python_names
rpm_names = []
# run as root since /tmp/pip-build-root must be owned by root
for name in sh.execute(*cmdline, run_as_root=True)[0].splitlines():
for name in sh.execute(cmdline)[0].splitlines():
# name is "Requires: rpm-name"
try:
rpm_names.append(name.split(":")[1].strip())
@ -264,8 +269,7 @@ BuildArch: noarch
# Ensure we copy the local repo file name to the main repo so that
# yum will find it when installing packages.
with sh.Rooted(True):
sh.copy(self.anvil_repo_filename, "/etc/yum.repos.d/")
sh.copy(self.anvil_repo_filename, "/etc/yum.repos.d/")
cmdline = []
if helper.is_installed(self.OPENSTACK_DEPS_PACKAGE_NAME):
@ -277,21 +281,18 @@ BuildArch: noarch
if cmdline:
cmdline = ["yum", "erase", "-y"] + cmdline
sh.execute(*cmdline, run_as_root=True, ignore_exit_code=True,
stdout_fh=sys.stdout, stderr_fh=sys.stderr)
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
cmdline = ["yum", "clean", "all"]
sh.execute(*cmdline, run_as_root=True)
sh.execute(cmdline)
cmdline = ["yum", "install", "-y", self.OPENSTACK_DEPS_PACKAGE_NAME]
sh.execute(*cmdline, run_as_root=True,
stdout_fh=sys.stdout, stderr_fh=sys.stderr)
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
rpm_names = self._convert_names_python2rpm(self.python_names)
if rpm_names:
cmdline = ["yum", "install", "-y"] + rpm_names
sh.execute(*cmdline, run_as_root=True,
stdout_fh=sys.stdout, stderr_fh=sys.stderr)
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)
def uninstall(self):
super(YumDependencyHandler, self).uninstall()
@ -303,5 +304,4 @@ BuildArch: noarch
if rpm_names:
cmdline = ["yum", "remove", "--remove-leaves", "-y"] + rpm_names
sh.execute(*cmdline, run_as_root=True,
stdout_fh=sys.stdout, stderr_fh=sys.stderr)
sh.execute(cmdline, stdout_fh=sys.stdout, stderr_fh=sys.stderr)

View File

@ -61,4 +61,4 @@ def apply_patches(patch_files, working_dir):
LOG.debug("Applying patch %s in directory %s", p, working_dir)
patch_contents = sh.load_file(p)
if len(patch_contents):
sh.execute(*PATCH_CMD, process_input=patch_contents)
sh.execute(PATCH_CMD, process_input=patch_contents)

View File

@ -80,37 +80,36 @@ class ForkRunner(base.Runner):
if not sh.isdir(trace_dir):
msg = "No trace directory found from which to stop: %r" % (app_name)
raise excp.StopException(msg)
with sh.Rooted(True):
fork_fns = self._form_file_names(app_name)
skip_kill = True
pid = None
try:
pid = fork_fns.extract_pid()
fork_fns = self._form_file_names(app_name)
skip_kill = True
pid = None
try:
pid = fork_fns.extract_pid()
skip_kill = False
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
skip_kill = False
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
skip_kill = False
if not skip_kill and pid is None:
msg = "Could not extract a valid pid from %r" % (fork_fns.pid)
raise excp.StopException(msg)
# Bother trying to kill said process?
if not skip_kill and pid is None:
msg = "Could not extract a valid pid from %r" % (fork_fns.pid)
raise excp.StopException(msg)
# Bother trying to kill said process?
if not skip_kill:
(killed, attempts) = sh.kill(pid)
else:
(killed, attempts) = (True, 0)
# Trash the files if it worked
if killed:
if not skip_kill:
(killed, attempts) = sh.kill(pid)
else:
(killed, attempts) = (True, 0)
# Trash the files if it worked
if killed:
if not skip_kill:
LOG.debug("Killed pid '%s' after %s attempts.", pid, attempts)
for leftover_fn in fork_fns.as_list():
if sh.exists(leftover_fn):
LOG.debug("Removing forking related file %r", (leftover_fn))
sh.unlink(leftover_fn)
else:
msg = "Could not stop %r after %s attempts" % (app_name, attempts)
raise excp.StopException(msg)
LOG.debug("Killed pid '%s' after %s attempts.", pid, attempts)
for leftover_fn in fork_fns.as_list():
if sh.exists(leftover_fn):
LOG.debug("Removing forking related file %r", (leftover_fn))
sh.unlink(leftover_fn)
else:
msg = "Could not stop %r after %s attempts" % (app_name, attempts)
raise excp.StopException(msg)
def status(self, app_name):
# Attempt to find the status of a given app by finding where that apps
@ -173,8 +172,7 @@ class ForkRunner(base.Runner):
if v is not None:
run_trace.trace(k, v)
LOG.debug("Forking %r by running command %r with args (%s)" % (app_name, app_pth, " ".join(args)))
with sh.Rooted(True):
sh.fork(app_pth, app_wkdir, fork_fns.pid, fork_fns.stdout, fork_fns.stderr, *args)
sh.fork(app_pth, app_wkdir, fork_fns.pid, fork_fns.stdout, fork_fns.stderr, *args)
return trace_fn
def _post_start(self, app_name):

View File

@ -14,6 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
# R0915: Too many statements
# pylint: disable=R0915
import distutils.spawn
import getpass
import grp
@ -50,23 +53,6 @@ class Process(psutil.Process):
return "%s (%s)" % (self.pid, self.name)
class Rooted(object):
def __init__(self, run_as_root):
self.root_mode = run_as_root
self.engaged = False
def __enter__(self):
if self.root_mode and not got_root():
root_mode()
self.engaged = True
return self.engaged
def __exit__(self, type, value, traceback):
if self.root_mode and self.engaged:
user_mode()
self.engaged = False
def set_dry_run(on_off):
global IS_DRYRUN
if not isinstance(on_off, (bool)):
@ -81,50 +67,44 @@ def is_dry_run():
# Originally borrowed from nova computes execute...
def execute(*cmd, **kwargs):
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_overrides = kwargs.pop('env_overrides', None)
ignore_exit_code = kwargs.pop('ignore_exit_code', False)
def execute(cmd,
process_input=None,
check_exit_code=True,
cwd=None,
shell=False,
env_overrides=None,
stdout_fh=None,
stderr_fh=None,
stdout_fn=None,
stderr_fn=None,
trace_writer=None):
"""Helper method to execute command.
:param cmd: Passed to subprocess.Popen
:param process_input: Send to opened process
:param check_exit_code: Single `bool`, `int`, or `list` of allowed exit
codes. By default, only 0 exit code is allowed.
Raise :class:`exceptions.ProcessExecutionError`
unless program exits with one of these code
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails
"""
if isinstance(check_exit_code, (bool)):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, (int)):
check_exit_code = [check_exit_code]
run_as_root = kwargs.pop('run_as_root', False)
shell = kwargs.pop('shell', False)
# Ensure all string args (ie for those that send ints and such...)
execute_cmd = [str(c) for c in cmd]
# From the docs it seems a shell command must be a string??
# TODO(harlowja) this might not really be needed?
str_cmd = " ".join(execute_cmd)
str_cmd = " ".join(shellquote(word) for word in cmd)
if shell:
execute_cmd = str_cmd.strip()
stdin_fh = subprocess.PIPE
stdout_fh = subprocess.PIPE
stderr_fh = subprocess.PIPE
stdout_fn = kwargs.get('stdout_fn')
stderr_fn = kwargs.get('stderr_fn')
trace_writer = kwargs.get('tracewriter')
if 'stdout_fh' in kwargs:
stdout_fh = kwargs['stdout_fh']
if stdout_fn:
LOG.warn("Stdout file handles and stdout file names can not be used simultaneously!")
stdout_fn = None
if 'stderr_fh' in kwargs:
stderr_fh = kwargs['stderr_fh']
if stderr_fn:
LOG.warn("Stderr file handles and stderr file names can not be used simultaneously!")
stderr_fn = None
execute_cmd = str_cmd
if not shell:
LOG.debug('Running cmd: %r' % (execute_cmd))
@ -136,56 +116,48 @@ def execute(*cmd, **kwargs):
if cwd:
LOG.debug("In working directory: %r" % (cwd))
if stdout_fn is not None and stdout_fh is not None:
LOG.warn("Stdout file handles and stdout file names can not be used simultaneously!")
if stderr_fn is not None and stderr_fh is not None:
LOG.warn("Stderr file handles and stderr file names can not be used simultaneously!")
process_env = None
if env_overrides and len(env_overrides):
process_env = env.get()
for (k, v) in env_overrides.items():
process_env[k] = str(v)
demoter = None
def demoter_functor(user_uid, user_gid):
def doit():
os.setregid(user_gid, user_gid)
os.setreuid(user_uid, user_uid)
return doit
if not run_as_root:
# Ensure we drop down to the suid user before the command
# is executed (ensuring we don't run in root mode when we
# should not be)
(user_uid, user_gid) = get_suids()
if user_uid is not None and user_gid is not None:
demoter = demoter_functor(user_uid=user_uid, user_gid=user_gid)
rc = None
result = None
with Rooted(run_as_root):
if is_dry_run():
rc = 0
result = ('', '')
else:
try:
obj = subprocess.Popen(execute_cmd, stdin=stdin_fh, stdout=stdout_fh, stderr=stderr_fh,
close_fds=True, cwd=cwd, shell=shell,
preexec_fn=demoter, env=process_env)
if process_input is not None:
result = obj.communicate(str(process_input))
else:
result = obj.communicate()
except OSError as e:
raise excp.ProcessExecutionError(description="%s: [%s, %s]" % (e, e.errno, e.strerror),
cmd=str_cmd)
rc = obj.returncode
result = ("", "")
if is_dry_run():
rc = 0
else:
stdin_fh = subprocess.PIPE
if stdout_fn or (stdout_fh is None):
stdout_fh = subprocess.PIPE
if stderr_fn or (stderr_fh is None):
stderr_fh = subprocess.PIPE
try:
obj = subprocess.Popen(execute_cmd, stdin=stdin_fh, stdout=stdout_fh, stderr=stderr_fh,
close_fds=True, cwd=cwd, shell=shell,
env=process_env)
if process_input is not None:
result = obj.communicate(str(process_input))
else:
result = obj.communicate()
except OSError as e:
raise excp.ProcessExecutionError(description="%s: [%s, %s]" % (e, e.errno, e.strerror),
cmd=str_cmd)
rc = obj.returncode
if not result:
result = ("", "")
(stdout, stderr) = result
if stdout is None:
stdout = ''
if stderr is None:
stderr = ''
if stdout_fh != subprocess.PIPE:
stdout = "<redirected to %s>" % (stdout_fn or stdout_fh)
else:
stdout = result[0] or ""
if stderr_fh != subprocess.PIPE:
stderr = "<redirected to %s>" % (stderr_fn or stderr_fh)
else:
stderr = result[1] or ""
if (not ignore_exit_code) and (rc not in check_exit_code):
raise excp.ProcessExecutionError(exit_code=rc, stdout=stdout,
@ -196,14 +168,11 @@ def execute(*cmd, **kwargs):
LOG.debug("A failure may of just happened when running command %r [%s] (%s, %s)",
str_cmd, rc, stdout, stderr)
# See if a requested storage place was given for stderr/stdout
if stdout_fn:
write_file(stdout_fn, stdout)
if trace_writer:
trace_writer.file_touched(stdout_fn)
if stderr_fn:
write_file(stderr_fn, stderr)
if trace_writer:
trace_writer.file_touched(stderr_fn)
for name, handle in ((stdout_fn, stdout), (stderr_fn, stderr)):
if name:
write_file(name, handle)
if trace_writer:
trace_writer.file_touched(name)
return (stdout, stderr)
@ -300,7 +269,7 @@ def get_suids():
return (uid, gid)
def chown(path, uid, gid, run_as_root=True):
def chown(path, uid, gid):
if uid is None:
uid = -1
if gid is None:
@ -308,23 +277,21 @@ def chown(path, uid, gid, run_as_root=True):
if uid == -1 and gid == -1:
return 0
LOG.debug("Changing ownership of %r to %s:%s" % (path, uid, gid))
with Rooted(run_as_root):
if not is_dry_run():
os.chown(path, uid, gid)
if not is_dry_run():
os.chown(path, uid, gid)
return 1
def chown_r(path, uid, gid, run_as_root=True):
def chown_r(path, uid, gid):
changed = 0
with Rooted(run_as_root):
for (root, dirs, files) in os.walk(path):
changed += chown(root, uid, gid)
for d in dirs:
dir_pth = joinpths(root, d)
changed += chown(dir_pth, uid, gid)
for f in files:
fn_pth = joinpths(root, f)
changed += chown(fn_pth, uid, gid)
for (root, dirs, files) in os.walk(path):
changed += chown(root, uid, gid)
for d in dirs:
dir_pth = joinpths(root, d)
changed += chown(dir_pth, uid, gid)
for f in files:
fn_pth = joinpths(root, f)
changed += chown(fn_pth, uid, gid)
return changed
@ -452,12 +419,12 @@ def is_running(pid):
return False
def mkdirslist(path, tracewriter=None, adjust_suids=False):
def mkdirslist(path, tracewriter=None):
dirs_possible = explode_path(path)
dirs_made = []
for dir_path in dirs_possible:
if not isdir(dir_path):
mkdir(dir_path, recurse=False, adjust_suids=adjust_suids)
mkdir(dir_path, recurse=False)
if tracewriter:
tracewriter.dirs_made(dir_path)
dirs_made.append(dir_path)
@ -514,7 +481,7 @@ def load_file(fn):
return data
def mkdir(path, recurse=True, adjust_suids=False):
def mkdir(path, recurse=True):
if not isdir(path):
if recurse:
LOG.debug("Recursively creating directory %r" % (path))
@ -524,30 +491,24 @@ def mkdir(path, recurse=True, adjust_suids=False):
LOG.debug("Creating directory %r" % (path))
if not is_dry_run():
os.mkdir(path)
if adjust_suids:
(uid, gid) = get_suids()
if uid is not None and gid is not None:
chown_r(path, uid, gid)
return path
def deldir(path, run_as_root=False):
with Rooted(run_as_root):
if isdir(path):
LOG.debug("Recursively deleting directory tree starting at %r" % (path))
if not is_dry_run():
shutil.rmtree(path)
def deldir(path):
if isdir(path):
LOG.debug("Recursively deleting directory tree starting at %r" % (path))
if not is_dry_run():
shutil.rmtree(path)
def rmdir(path, quiet=True, run_as_root=False):
def rmdir(path, quiet=True):
if not isdir(path):
return
try:
with Rooted(run_as_root):
LOG.debug("Deleting directory %r with the cavet that we will fail if it's not empty." % (path))
if not is_dry_run():
os.rmdir(path)
LOG.debug("Deleted directory %r" % (path))
LOG.debug("Deleting directory %r with the cavet that we will fail if it's not empty." % (path))
if not is_dry_run():
os.rmdir(path)
LOG.debug("Deleted directory %r" % (path))
except OSError:
if not quiet:
raise
@ -555,16 +516,15 @@ def rmdir(path, quiet=True, run_as_root=False):
pass
def symlink(source, link, force=True, run_as_root=True, tracewriter=None):
with Rooted(run_as_root):
LOG.debug("Creating symlink from %r => %r" % (link, source))
mkdirslist(dirname(link), tracewriter=tracewriter)
if not is_dry_run():
if force and (exists(link) and islink(link)):
unlink(link, True)
os.symlink(source, link)
if tracewriter:
tracewriter.symlink_made(link)
def symlink(source, link, force=True, tracewriter=None):
LOG.debug("Creating symlink from %r => %r" % (link, source))
mkdirslist(dirname(link), tracewriter=tracewriter)
if not is_dry_run():
if force and (exists(link) and islink(link)):
unlink(link, True)
os.symlink(source, link)
if tracewriter:
tracewriter.symlink_made(link)
def exists(path):
@ -632,12 +592,11 @@ def getgroupname():
return grp.getgrgid(gid).gr_name
def unlink(path, ignore_errors=True, run_as_root=False):
def unlink(path, ignore_errors=True):
LOG.debug("Unlinking (removing) %r" % (path))
if not is_dry_run():
try:
with Rooted(run_as_root):
os.unlink(path)
os.unlink(path)
except OSError:
if not ignore_errors:
raise
@ -718,26 +677,6 @@ def root_mode(quiet=True):
raise excp.PermException(msg)
def user_mode(quiet=True):
(sudo_uid, sudo_gid) = get_suids()
if sudo_uid is not None and sudo_gid is not None:
try:
os.setregid(0, sudo_gid)
os.setreuid(0, sudo_uid)
except OSError as e:
msg = "Cannot drop permissions to (uid=%s, gid=%s): %s" % (sudo_uid, sudo_gid, e)
if quiet:
LOG.warn(msg)
else:
raise excp.PermException(msg)
else:
msg = "Can not switch to user mode, no suid user id or suid group id"
if quiet:
LOG.warn(msg)
else:
raise excp.PermException(msg)
def is_executable(fn):
return isfile(fn) and isuseable(fn, options=os.X_OK)

View File

@ -286,10 +286,10 @@ def execute_template(cmd, *cmds, **kargs):
stdin_tpl = [stdin_tpl]
stdin = [expand_template(c, params) for c in stdin_tpl]
stdin = "\n".join(stdin)
result = sh.execute(*run_what,
run_as_root=info.get('run_as_root', False),
result = sh.execute(run_what,
process_input=stdin,
ignore_exit_code=info.get('ignore_failure', False),
check_exit_code=not info.get(
'ignore_failure', False),
**kargs)
results.append(result)
return results

View File

@ -322,14 +322,12 @@ components:
- qpidd
- stop
ignore_failure: true
run_as_root: true
# Also stop it from starting on boot (if rebooted)
- cmd:
- chkconfig
- qpidd
- 'off'
ignore_failure: true
run_as_root: true
swift-client:
action_classes:
install: anvil.components.base_install:PythonInstallComponent