Deprecate EnablePaunch and remove Paunch support

Paunch was deprecated in Ussuri and is now being retired, to be fully
replaced by the new tripleo-ansible role, tripleo_container_manage.

This patch:

- Removes common/container-puppet.py (was only useful when paunch is
  enabled, since that script was converted to container_puppet_config
  Ansible module in tripleo-ansible).
- Update all comments refering to paunch, and replace by
  tripleo_container_manage.
- Deprecate EnablePaunch parameter.
- Remove paunch as python dependencies.

Depends-On: https://review.opendev.org/#/c/731545/
Change-Id: I9294677fa18a7efc61898a25103414c8191d8805
This commit is contained in:
Emilien Macchi 2020-05-28 11:18:54 -04:00
parent c8392e39df
commit 3a00c029f2
21 changed files with 36 additions and 750 deletions

View File

@ -1,576 +0,0 @@
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given container image.
# Uses the config file at /var/lib/container-puppet/container-puppet.json
# as a source for a JSON array of
# [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import multiprocessing
import os
import subprocess
import sys
import tempfile
import time
from paunch import runner as containers_runner
def get_logger():
"""Return a logger object."""
logger = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG') in ['True', 'true']:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(process)s -- %(message)s'
)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def local_subprocess_call(cmd, env=None):
"""General run method for subprocess calls.
:param cmd: list
returns: tuple
"""
subproc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
env=env
)
stdout, stderr = subproc.communicate()
return stdout, stderr, subproc.returncode
def pull_image(name):
_, _, rc = local_subprocess_call(cmd=[CLI_CMD, 'inspect', name])
if rc == 0:
LOG.info('Image already exists: %s' % name)
return
retval = -1
count = 0
LOG.info('Pulling image: %s' % name)
while retval != 0:
count += 1
stdout, stderr, retval = local_subprocess_call(
cmd=[CLI_CMD, 'pull', name]
)
if retval != 0:
time.sleep(3)
LOG.warning('%s pull failed: %s' % (CONTAINER_CLI, stderr))
LOG.warning('retrying pulling image: %s' % name)
if count >= 5:
LOG.error('Failed to pull image: %s' % name)
break
if stdout:
LOG.debug(stdout)
if stderr:
LOG.debug(stderr)
def get_config_base(prefix, volume):
# crawl the volume's path upwards until we find the
# volume's base, where the hashed config file resides
path = volume
base = prefix.rstrip(os.path.sep)
base_generated = os.path.join(base, 'puppet-generated')
while path.startswith(prefix):
dirname = os.path.dirname(path)
if dirname == base or dirname == base_generated:
return path
else:
path = dirname
raise ValueError("Could not find config's base for '%s'" % volume)
def match_config_volumes(prefix, config):
# Match the mounted config volumes - we can't just use the
# key as e.g "novacomute" consumes config-data/nova
try:
volumes = config.get('volumes', [])
except AttributeError:
LOG.error(
'Error fetching volumes. Prefix: %s - Config: %s' % (
prefix,
config
)
)
raise
return sorted([get_config_base(prefix, v.split(":")[0])
for v in volumes if v.startswith(prefix)])
def get_config_hash(config_volume):
hashfile = "%s.md5sum" % config_volume
LOG.debug(
"Looking for hashfile %s for config_volume %s" % (
hashfile,
config_volume
)
)
hash_data = None
if os.path.isfile(hashfile):
LOG.debug(
"Got hashfile %s for config_volume %s" % (
hashfile,
config_volume
)
)
with open(hashfile) as f:
hash_data = f.read().rstrip()
return hash_data
def mp_puppet_config(*args):
(
config_volume,
puppet_tags,
manifest,
config_image,
volumes,
privileged,
check_mode,
keep_container
) = args[0]
LOG.info('Starting configuration of %s using image %s' %
(config_volume, config_image))
LOG.debug('config_volume %s' % config_volume)
LOG.debug('puppet_tags %s' % puppet_tags)
LOG.debug('manifest %s' % manifest)
LOG.debug('config_image %s' % config_image)
LOG.debug('volumes %s' % volumes)
LOG.debug('privileged %s' % privileged)
LOG.debug('check_mode %s' % check_mode)
LOG.debug('keep_container %s' % keep_container)
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
man_file.write('include tripleo::packages\n')
man_file.write(manifest)
uname = RUNNER.unique_container_name(
'container-puppet-%s' % config_volume
)
LOG.info('Removing container: %s' % uname)
RUNNER.remove_container(uname)
pull_image(config_image)
common_dcmd = [
CLI_CMD,
'run',
'--user',
# Using '0' and not 'root' because it seems podman is susceptible to a race condition
# https://bugzilla.redhat.com/show_bug.cgi?id=1776766 and
# https://bugs.launchpad.net/tripleo/+bug/1803544 which are still lurking
# by using a UID we skip the code that parses /etc/passwd entirely and basically
# paper over this issue
'0',
'--name',
uname,
'--env',
'PUPPET_TAGS=%s' % puppet_tags,
'--env',
'NAME=%s' % config_volume,
'--env',
'HOSTNAME=%s' % os.environ.get('SHORT_HOSTNAME'),
'--env',
'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
'--env',
'STEP=%s' % os.environ.get('STEP', '6'),
'--env',
'NET_HOST=%s' % os.environ.get('NET_HOST', 'false'),
'--env',
'DEBUG=%s' % os.environ.get('DEBUG', 'false'),
'--volume',
'/etc/localtime:/etc/localtime:ro',
'--volume',
'%s:/etc/config.pp:ro' % tmp_man.name,
'--volume',
'/etc/puppet/:/tmp/puppet-etc/:ro',
# OpenSSL trusted CA injection
'--volume',
'/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
'--volume',
'/etc/pki/tls/certs/ca-bundle.crt:'
'/etc/pki/tls/certs/ca-bundle.crt:ro',
'--volume',
'/etc/pki/tls/certs/ca-bundle.trust.crt:'
'/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
'--volume',
'/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
'--volume',
'%s:/var/lib/config-data/:rw' % CONFIG_VOLUME_PREFIX,
# facter caching
'--volume',
'/var/lib/container-puppet/puppetlabs/facter.conf:'
'/etc/puppetlabs/facter/facter.conf:ro',
'--volume',
'/var/lib/container-puppet/puppetlabs/:/opt/puppetlabs/:ro',
# Syslog socket for puppet logs
'--volume', '/dev/log:/dev/log:rw'
]
# Remove container by default after the run
# This should mitigate the "ghost container" issue described here
# https://bugzilla.redhat.com/show_bug.cgi?id=1747885
# https://bugs.launchpad.net/tripleo/+bug/1840691
if not keep_container:
common_dcmd.append('--rm')
if privileged:
common_dcmd.append('--privileged')
if CONTAINER_CLI == 'podman':
log_path = os.path.join(CONTAINER_LOG_STDOUT_PATH, uname)
logging = ['--log-driver', 'k8s-file',
'--log-opt',
'path=%s.log' % log_path]
common_dcmd.extend(logging)
elif CONTAINER_CLI == 'docker':
# NOTE(flaper87): Always copy the DOCKER_* environment variables as
# they contain the access data for the docker daemon.
for k in os.environ.keys():
if k.startswith('DOCKER'):
ENV[k] = os.environ.get(k)
common_dcmd += CLI_DCMD
if CHECK_MODE:
common_dcmd.extend([
'--volume',
'/etc/puppet/check-mode:/tmp/puppet-check-mode:ro'])
for volume in volumes:
if volume:
common_dcmd.extend(['--volume', volume])
common_dcmd.extend(['--entrypoint', SH_SCRIPT])
if os.environ.get('NET_HOST', 'false') == 'true':
LOG.debug('NET_HOST enabled')
common_dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
else:
LOG.debug('running without containers Networking')
common_dcmd.extend(['--net', 'none'])
# script injection as the last mount to make sure it's accessible
# https://github.com/containers/libpod/issues/1844
common_dcmd.extend(['--volume', '%s:%s:ro' % (SH_SCRIPT, SH_SCRIPT)])
common_dcmd.append(config_image)
# https://github.com/containers/libpod/issues/1844
# This block will run "CONTAINER_CLI" run 5 times before to fail.
retval = -1
count = 0
LOG.debug(
'Running %s command: %s' % (
CONTAINER_CLI,
' '.join(common_dcmd)
)
)
while count < 3:
count += 1
stdout, stderr, retval = local_subprocess_call(
cmd=common_dcmd,
env=ENV
)
# puppet with --detailed-exitcodes will return 0 for success and
# no changes and 2 for success and resource changes. Other
# numbers are failures
if retval in [0, 2]:
if stdout:
LOG.debug('%s run succeeded: %s' % (common_dcmd, stdout))
if stderr:
LOG.warning(stderr)
# only delete successful runs, for debugging
LOG.info('Removing container: %s' % uname)
RUNNER.remove_container(uname)
break
time.sleep(3)
LOG.error(
'%s run failed after %s attempt(s): %s' % (
common_dcmd,
stderr,
count
)
)
LOG.info('Removing container: %s' % uname)
RUNNER.remove_container(uname)
LOG.warning('Retrying running container: %s' % config_volume)
else:
if stdout:
LOG.debug(stdout)
if stderr:
LOG.debug(stderr)
LOG.error('Failed running container for %s' % config_volume)
LOG.info(
'Finished processing puppet configs for %s' % (
config_volume
)
)
return retval
def infile_processing(infiles):
for infile in infiles:
# If the JSON is already hashed, we'll skip it; and a new hashed file will
# be created if config changed.
if 'hashed' in infile:
LOG.debug('%s skipped, already hashed' % infile)
continue
with open(infile) as f:
infile_data = json.load(f)
# if the contents of the file is None, we need should just create an empty
# data set see LP#1828295
if not infile_data:
infile_data = {}
c_name = os.path.splitext(os.path.basename(infile))[0]
config_volumes = match_config_volumes(
CONFIG_VOLUME_PREFIX,
infile_data
)
config_hashes = [
get_config_hash(volume_path) for volume_path in config_volumes
]
config_hashes = filter(None, config_hashes)
config_hash = '-'.join(config_hashes)
if config_hash:
LOG.debug(
"Updating config hash for %s, hash=%s" % (
c_name,
config_hash
)
)
# When python 27 support is removed, we will be able to use:
# z = {**x, **y} to merge the dicts.
if infile_data.get('environment', None) is None:
infile_data['environment'] = {}
infile_data['environment'].update(
{'TRIPLEO_CONFIG_HASH': config_hash}
)
outfile = os.path.join(
os.path.dirname(
infile
), "hashed-" + os.path.basename(infile)
)
with open(outfile, 'w') as out_f:
os.chmod(out_f.name, 0o600)
json.dump(infile_data, out_f, indent=2)
if __name__ == '__main__':
PUPPETS = (
'/usr/share/openstack-puppet/modules/:'
'/usr/share/openstack-puppet/modules/:ro'
)
SH_SCRIPT = '/var/lib/container-puppet/container-puppet.sh'
CONTAINER_CLI = os.environ.get('CONTAINER_CLI', 'podman')
CONTAINER_LOG_STDOUT_PATH = os.environ.get(
'CONTAINER_LOG_STDOUT_PATH',
'/var/log/containers/stdouts'
)
CLI_CMD = '/usr/bin/' + CONTAINER_CLI
LOG = get_logger()
LOG.info('Running container-puppet')
CONFIG_VOLUME_PREFIX = os.path.abspath(
os.environ.get(
'CONFIG_VOLUME_PREFIX',
'/var/lib/config-data'
)
)
CHECK_MODE = int(os.environ.get('CHECK_MODE', 0))
LOG.debug('CHECK_MODE: %s' % CHECK_MODE)
if CONTAINER_CLI == 'docker':
CLI_DCMD = ['--volume', PUPPETS]
ENV = {}
RUNNER = containers_runner.DockerRunner(
'container-puppet',
cont_cmd='docker',
log=LOG
)
elif CONTAINER_CLI == 'podman':
# podman doesn't allow relabeling content in /usr and
# doesn't support named volumes
CLI_DCMD = [
'--security-opt',
'label=disable',
'--volume',
PUPPETS
]
# podman need to find dependent binaries that are in environment
ENV = {'PATH': os.environ['PATH']}
RUNNER = containers_runner.PodmanRunner(
'container-puppet',
cont_cmd='podman',
log=LOG
)
else:
LOG.error('Invalid CONTAINER_CLI: %s' % CONTAINER_CLI)
raise SystemExit()
config_file = os.environ.get(
'CONFIG',
'/var/lib/container-puppet/container-puppet.json'
)
LOG.debug('CONFIG: %s' % config_file)
# If specified, only this config_volume will be used
CONFIG_VOLUME_ONLY = os.environ.get('CONFIG_VOLUME', None)
with open(config_file) as f:
JSON_DATA = json.load(f)
# To save time we support configuring 'shared' services at the same
# time. For example configuring all of the heat services
# in a single container pass makes sense and will save some time.
# To support this we merge shared settings together here.
#
# We key off of config_volume as this should be the same for a
# given group of services. We are also now specifying the container
# in which the services should be configured. This should match
# in all instances where the volume name is also the same.
CONFIGS = {}
for service in (JSON_DATA or []):
if service is None:
continue
if isinstance(service, dict):
service = [
service.get('config_volume'),
service.get('puppet_tags'),
service.get('step_config'),
service.get('config_image'),
service.get('volumes', []),
service.get('privileged', False),
]
CONFIG_VOLUME = service[0] or ''
PUPPET_TAGS = service[1] or ''
MANIFEST = service[2] or ''
CONFIG_IMAGE = service[3] or ''
VOLUMES = service[4] if len(service) > 4 else []
if not MANIFEST or not CONFIG_IMAGE:
continue
LOG.debug('config_volume %s' % CONFIG_VOLUME)
LOG.debug('puppet_tags %s' % PUPPET_TAGS)
LOG.debug('manifest %s' % MANIFEST)
LOG.debug('config_image %s' % CONFIG_IMAGE)
LOG.debug('volumes %s' % VOLUMES)
LOG.debug('privileged %s' % service[5] if len(service) > 5 else False)
# We key off of config volume for all configs.
if CONFIG_VOLUME in CONFIGS:
# Append puppet tags and manifest.
LOG.debug("Existing service, appending puppet tags and manifest")
if PUPPET_TAGS:
CONFIGS[CONFIG_VOLUME][1] = '%s,%s' % (
CONFIGS[CONFIG_VOLUME][1],
PUPPET_TAGS
)
if MANIFEST:
CONFIGS[CONFIG_VOLUME][2] = '%s\n%s' % (
CONFIGS[CONFIG_VOLUME][2],
MANIFEST
)
if CONFIGS[CONFIG_VOLUME][3] != CONFIG_IMAGE:
LOG.warning("Config containers do not match even though"
" shared volumes are the same!")
if VOLUMES:
CONFIGS[CONFIG_VOLUME][4].extend(VOLUMES)
else:
if not CONFIG_VOLUME_ONLY or (CONFIG_VOLUME_ONLY == CONFIG_VOLUME):
LOG.debug("Adding new service")
CONFIGS[CONFIG_VOLUME] = service
else:
LOG.debug(
"Ignoring %s due to $CONFIG_VOLUME=%s" % (
CONFIG_VOLUME,
CONFIG_VOLUME_ONLY
)
)
LOG.info('Service compilation completed.')
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
# to consume.
PROCESS_MAP = []
for config_volume in CONFIGS:
SERVICE = CONFIGS[config_volume]
PUPPET_TAGS = SERVICE[1] or ''
if PUPPET_TAGS:
PUPPET_TAGS = "file,file_line,concat,augeas,cron,%s" % PUPPET_TAGS
else:
PUPPET_TAGS = "file,file_line,concat,augeas,cron"
PROCESS_ITEM = [
config_volume,
PUPPET_TAGS,
SERVICE[2] or '',
SERVICE[3] or '',
SERVICE[4] if len(SERVICE) > 4 else [],
SERVICE[5] if len(SERVICE) > 5 else False,
CHECK_MODE,
SERVICE[6] if len(SERVICE) > 6 else False
]
PROCESS_MAP.append(PROCESS_ITEM)
LOG.debug('- %s' % PROCESS_ITEM)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
PROCESS = multiprocessing.Pool(int(os.environ.get('PROCESS_COUNT', 2)))
RETURNCODES = list(PROCESS.map(mp_puppet_config, PROCESS_MAP))
CONFIG_VOLUMES = [pm[0] for pm in PROCESS_MAP]
SUCCESS = True
for returncode, config_volume in zip(RETURNCODES, CONFIG_VOLUMES):
if returncode not in [0, 2]:
LOG.error('ERROR configuring %s' % config_volume)
SUCCESS = False
# Update the startup configs with the config hash we generated above
STARTUP_CONFIGS = os.environ.get(
'STARTUP_CONFIG_PATTERN',
'/var/lib/tripleo-config/'
'container-startup-config/'
'step_' + os.environ.get('STEP', '6') + '/*.json'
)
LOG.debug('STARTUP_CONFIG_PATTERN: %s' % STARTUP_CONFIGS)
# Run infile processing
infile_processing(infiles=glob.glob(STARTUP_CONFIGS))
if not SUCCESS:
raise SystemExit(1)

View File

@ -81,11 +81,11 @@ if [ -z "$NO_ARCHIVE" ]; then
done
# On stack update, if a password was changed in a config file,
# some services (e.g. mysql) must change their internal state
# (e.g. password in mysql DB) when paunch restarts them; and
# they need the old password to achieve that.
# (e.g. password in mysql DB) when tripleo_container_manage restarts them;
# and they need the old password to achieve that.
# For those services, we update the config hash to notify
# paunch that a restart is needed, but we do not update the
# password file in container-puppet if the file already existed
# tripleo_container_manage that a restart is needed, but we do not update
# the password file in container-puppet if the file already existed
# before and let the service regenerate it instead.
password_files="/root/.my.cnf"

View File

@ -9,18 +9,6 @@
- name: Create /var/lib/container-puppet
no_log: True
file: path=/var/lib/container-puppet state=directory setype=container_file_t selevel=s0 recurse=true
- name: Write container-puppet.py if Paunch is enabled
no_log: True
copy: src=docker_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.py force=yes mode=0600
when:
- enable_paunch|default(false)
- name: Remove container-puppet.py if Paunch is disabled
no_log: True
file:
path: /var/lib/container-puppet/container-puppet.py
state: absent
when:
- not enable_paunch|default(false)
- name: Write container-puppet.sh
no_log: True
copy: src=container_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.sh force=yes mode=0755 setype=container_file_t

View File

@ -85,50 +85,7 @@
# Per step starting of the containers
#####################################
# Note container-puppet.py generates the hashed-*.json file, which is a copy of
# the *step_n.json with a hash of the generated external config added
# This acts as a salt to enable restarting the container if config changes
- name: Per step starting of the containers using Paunch
when: enable_paunch|default(true)
block:
- name: Start containers for step {{ step }} using paunch
async: 3600
poll: 0
environment:
TRIPLEO_MINOR_UPDATE: '{{ tripleo_minor_update | default(false) }}'
paunch:
config: "/var/lib/tripleo-config/container-startup-config/step_{{ step }}"
config_id: "tripleo_step{{ step }}"
action: apply
container_cli: "{{ container_cli }}"
container_log_stdout_path: "{{ container_log_stdout_path }}"
healthcheck_disabled: "{{ container_healthcheck_disabled | bool }}"
managed_by: "tripleo-{{ tripleo_role_name }}"
debug: "{{ enable_debug | bool }}"
register: start_containers_async_result
tags:
- container_startup_configs
- name: Wait for containers to start for step {{ step }} using paunch
async_status:
jid: "{{ start_containers_async_result.ansible_job_id }}"
register: start_containers_outputs
until: start_containers_outputs.finished
retries: 1200
delay: 3
tags:
- container_startup_configs
- name: "Debug output for task: Start containers for step {{ step }}"
debug:
var: start_containers_outputs.stdout_lines | default([]) | union(start_containers_outputs.stderr_lines | default([]))
when: start_containers_outputs.rc is defined
failed_when: start_containers_outputs.rc != 0
tags:
- container_startup_configs
- name: Per step starting of the containers using tripleo-ansible
when: not enable_paunch|default(true)
environment:
TRIPLEO_MINOR_UPDATE: '{{ tripleo_minor_update | default(false) }}'
block:

View File

@ -76,7 +76,8 @@ parameters:
type: boolean
EnablePaunch:
default: false
description: Whether to run paunch during container deployment tasks.
description: >
(DEPRECATED) Whether to run paunch during container deployment tasks.
type: boolean
DockerPuppetDebug:
type: boolean
@ -234,6 +235,16 @@ parameters:
description: The name of the stack/plan.
type: string
parameter_groups:
- label: deprecated
description: |
The following parameters are deprecated and will be removed. They should not
be relied on for new deployments. If you have concerns regarding deprecated
parameters, please contact the TripleO development team on IRC or the
OpenStack mailing list.
parameters:
- EnablePaunch
conditions:
{% for role in enabled_roles %}
{{role.name}}NonZero:
@ -369,7 +380,6 @@ outputs:
deploy_identifier: {get_param: DeployIdentifier}
stack_update_type: {get_param: StackUpdateType}
container_cli: {get_param: ContainerCli}
enable_paunch: {get_param: EnablePaunch}
enabled_services: {get_param: EnabledServices}
control_virtual_ip: {get_param: ControlVirtualIP}
enabled_networks: {get_param: EnabledNetworks}
@ -412,7 +422,6 @@ outputs:
deploy_steps_tasks_step_0: {get_file: deploy-steps-tasks-step-0.yaml}
common_deploy_steps_tasks_step_1: {get_file: deploy-steps-tasks-step-1.yaml}
container_startup_configs_tasks: {get_file: container_startup_configs_tasks.yaml}
docker_puppet_script: {get_file: ./container-puppet.py}
container_puppet_script: {get_file: ./container-puppet.sh}
all_nodes_validation_script.sh : {get_file: ../validation-scripts/all-nodes.sh}
deploy-artifacts.sh : {get_file: ../puppet/deploy-artifacts.sh}
@ -434,7 +443,6 @@ outputs:
DEPLOY_IDENTIFIER: {get_param: DeployIdentifier}
ENABLE_DEBUG: {get_param: ConfigDebug}
ENABLE_PUPPET: {get_param: EnablePuppet}
ENABLE_PAUNCH: {get_param: EnablePaunch}
CONTAINER_CLI: {get_param: ContainerCli}
CONTAINER_LOG_STDOUT_PATH: {get_param: ContainerLogStdoutPath}
CONTAINER_HEALTHCHECK_DISABLED: {get_param: ContainerHealthcheckDisabled}
@ -1253,11 +1261,6 @@ outputs:
{%- endfor %}
- name: Create /var/lib/container-puppet
file: path=/var/lib/container-puppet state=directory setype=container_file_t selevel=s0 recurse=true
- name: Write container-puppet.py if Paunch is enabled
no_log: True
copy: src=docker_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.py force=yes mode=0600
when:
- enable_paunch|default(false)
- name: Write container-puppet.sh
no_log: True
copy: src=container_puppet_script.yaml dest=/var/lib/container-puppet/container-puppet.sh force=yes mode=0755 setype=container_file_t

View File

@ -1,48 +1,4 @@
- name: Block for container-puppet tasks (generate config) during step 1 with paunch
when:
- enable_paunch|default(true)
tags:
- container_config
block:
- name: Run container-puppet tasks (generate config) during step 1 with paunch
async: 3600
poll: 0
shell: "{{ python_cmd }} /var/lib/container-puppet/container-puppet.py"
environment:
NET_HOST: 'true'
DEBUG: '{{ docker_puppet_debug | bool }}'
PROCESS_COUNT: "{{ docker_puppet_process_count }}"
CONTAINER_CLI: "{{ container_cli }}"
CONFIG: '/var/lib/container-puppet/{{ ansible_check_mode | bool | ternary("check-mode/", "") }}container-puppet.json'
CONFIG_VOLUME_PREFIX: '/var/lib/config-data{{ ansible_check_mode | bool | ternary("/check-mode", "") }}'
CHECK_MODE: '{{ ansible_check_mode | bool | ternary(1, 0) }}'
STARTUP_CONFIG_PATTERN: '/var/lib/tripleo-config/container-startup-config/*/{{ ansible_check_mode | bool | ternary("check-mode/", "") }}*.json'
MOUNT_HOST_PUPPET: '{{docker_puppet_mount_host_puppet | default(true)}}'
CONTAINER_LOG_STDOUT_PATH: "{{ container_log_stdout_path }}"
CONTAINER_HEALTHCHECK_DISABLED: "{{ container_healthcheck_disabled }}"
SHORT_HOSTNAME: "{{ ansible_hostname | lower }}"
check_mode: no
register: generate_config_async_result
- name: Wait for container-puppet tasks (generate config) to finish
async_status:
jid: "{{ generate_config_async_result.ansible_job_id }}"
register: generate_config_outputs
until: generate_config_outputs.finished
retries: 1200
delay: 3
- name: "Debug output for task: Run container-puppet tasks (generate config) during step 1"
debug:
var: generate_config_outputs.stdout_lines | default([]) | union(generate_config_outputs.stderr_lines | default([]))
when:
- not (ansible_check_mode | bool)
- generate_config_outputs.rc is defined
failed_when: generate_config_outputs.rc != 0
- name: Block for container-puppet tasks (generate config) during step {{ step }} with tripleo-ansible
when:
- not enable_paunch|default(true)
tags:
- container_config
block:

View File

@ -8,48 +8,7 @@
tags:
- container_config_tasks
- name: Block for container-puppet tasks (bootstrap tasks) for step {{ step }} with paunch
when:
- enable_paunch|default(true)
tags:
- container_config_tasks
block:
- name: Run container-puppet tasks (bootstrap tasks) for step {{ step }} with paunch
async: 3600
poll: 0
shell: "{{ python_cmd }} /var/lib/container-puppet/container-puppet.py"
environment:
CONFIG: /var/lib/container-puppet/{{ ansible_check_mode | bool | ternary('check-mode/', '') }}container-puppet-tasks{{ step }}.json
CONFIG_VOLUME_PREFIX: '/var/lib/config-data{{ ansible_check_mode | bool | ternary("/check-mode", "") }}'
NET_HOST: "true"
NO_ARCHIVE: "true"
STEP: "{{ step }}"
CONTAINER_CLI: "{{ container_cli }}"
DEBUG: "{{ docker_puppet_debug }}"
MOUNT_HOST_PUPPET: '{{docker_puppet_mount_host_puppet}}'
SHORT_HOSTNAME: "{{ ansible_hostname | lower }}"
PROCESS_COUNT: "{{ docker_puppet_process_count }}"
register: bootstrap_tasks_async_result
no_log: true
- name: Wait for container-puppet tasks (bootstrap tasks) for step {{ step }} to finish
async_status:
jid: "{{ bootstrap_tasks_async_result.ansible_job_id }}"
register: bootstrap_tasks_outputs
until: bootstrap_tasks_outputs.finished
retries: 1200
delay: 3
- name: "Debug output for task: Run container-puppet tasks (bootstrap tasks) for step {{ step }}"
debug:
var: bootstrap_tasks_outputs.stdout_lines | default([]) | union(bootstrap_tasks_outputs.stderr_lines | default([]))
when:
- bootstrap_tasks_outputs.rc is defined
failed_when: bootstrap_tasks_outputs.rc != 0
- name: Block for container-puppet tasks (bootstrap tasks) for step {{ step }} with tripleo-ansible
when:
- not enable_paunch|default(true)
tags:
- container_config_tasks
block:

View File

@ -67,16 +67,16 @@ are available for containerized services.
* config_settings: This section contains service specific hiera data
can be used to generate config files for each service. This data
is ultimately processed via the container-puppet.py tool (in new versions
it's handled by the container_puppet_config module in tripleo-ansible) which
generates config files for each service according to the settings here.
is ultimately processed via the container_puppet_config module in
tripleo-ansible which generates config files for each service according to
the settings here.
* kolla_config: Contains YAML that represents how to map config files
into the kolla container. This config file is typically mapped into
the container itself at the /var/lib/kolla/config_files/config.json
location and drives how kolla's external config mechanisms work.
* docker_config: Data that is passed to paunch tool to configure
* docker_config: Data that is passed to tripleo_container_manage role to configure
a container, or step of containers at each step. See the available steps
documented below which are implemented by TripleO's cluster deployment
architecture. If you want the tasks executed only once for the bootstrap

View File

@ -924,7 +924,6 @@ outputs:
After=time-sync.target
After=virt-guest-shutdown.target
After=docker.service
After=paunch-container-shutdown.service
After=tripleo-container-shutdown.service
After=rhel-push-plugin.service
Documentation=man:libvirtd(8)

View File

@ -103,7 +103,7 @@ outputs:
host_prep_tasks:
upgrade_tasks:
update_tasks:
# Nothing: It's not managed by pacemaker, so let paunch do it.
# Nothing: It's not managed by pacemaker, so let tripleo_container_manage do it.
external_upgrade_tasks:
- when:
- step|int == 1

View File

@ -55,7 +55,7 @@ parameters:
registry_username: password
SystemdDropInDependencies:
default: true
description: tell the container manager (e.g. paunch) to inject
description: tell the tripleo_container_manage to inject
additional ordering dependencies for the systemd
scopes associated to podman containers.
type: boolean
@ -120,15 +120,15 @@ outputs:
- if:
- systemd_drop_in_dependencies_enabled
- - name: Configure paunch to generate systemd drop-in dependencies
- - name: Configure tripleo_container_manage to generate systemd drop-in dependencies
copy:
dest: /etc/sysconfig/podman_drop_in
content: |
This file makes paunch generate additional systemd
This file makes tripleo_container_manage generate additional systemd
dependencies for containers that have special
start/stop ordering constraints. It ensures that
those constraints are enforced on reboot/shutdown.
- - name: Configure paunch to not generate drop-in dependencies
- - name: Configure tripleo_container_manage to not generate drop-in dependencies
file:
path: /etc/sysconfig/podman_drop_in
state: absent

View File

@ -362,6 +362,6 @@ outputs:
update_tasks:
# TODO: Are we sure we want to support this. Rolling update
# without pacemaker may fail. Do we test this ? In any case,
# this is under paunch control so the latest image should be
# this is under tripleo_container_manage control so the latest image should be
# pulled in by the deploy steps. Same question for other
# usually managed by pacemaker container.

View File

@ -293,6 +293,6 @@ outputs:
update_tasks:
# TODO: Are we sure we want to support this. Rolling update
# without pacemaker may fail. Do we test this ? In any case,
# this is under paunch control so the latest image should be
# this is under tripleo_container_manage control so the latest image should be
# pulled in by the deploy steps. Same question for other
# usually managed by pacemaker container.

View File

@ -293,6 +293,6 @@ outputs:
update_tasks:
# TODO: Are we sure we want to support this. Rolling update
# without pacemaker may fail. Do we test this ? In any case,
# this is under paunch control so the latest image should be
# this is under tripleo_container_manage control so the latest image should be
# pulled in by the deploy steps. Same question for other
# usually managed by pacemaker container.

View File

@ -1,5 +1,5 @@
# A Heat environment file which can be used to enable config
# management (e.g. Puppet/Paunch) debugging.
# management (e.g. Puppet/Ansible) debugging.
parameter_defaults:
ConfigDebug: true

View File

@ -1,6 +1,4 @@
# This heat environment can be used to disable Paunch to manage containers..
# When Paunch is disabled, the containers not managed by Pacemaker will be
# deployed by TripleO Ansible, in tripleo-container-manage role.
# This environment is deprecated as Paunch is being retired.
parameter_defaults:
EnablePaunch: false

View File

@ -23,4 +23,3 @@ resource_registry:
parameter_defaults:
ContainerCli: podman
ClusterCommonTag: true
EnablePaunch: false

View File

@ -83,7 +83,6 @@ paramiko==2.0.0
passlib==1.7.0
Paste==2.0.2
PasteDeploy==1.5.0
paunch==4.2.0
pbr==2.0.0
pecan==1.0.0
pika-pool==0.1.3

View File

@ -603,7 +603,7 @@ resources:
hieradata_files:
- '"%{::uuid}"'
- fqdn
- docker_puppet # Optionally provided by container-puppet.py
- docker_puppet # Optionally provided by container-puppet.sh
- ansible_managed
- heat_config_%{::deploy_config_name}
- config_step

View File

@ -0,0 +1,5 @@
---
deprecations:
- |
Paunch was deprecated in Ussuri and is now being retired, to be fully
replaced by the new tripleo-ansible role, tripleo_container_manage.

View File

@ -6,4 +6,3 @@ PyYAML>=3.12 # MIT
Jinja2>=2.10 # BSD License (3 clause)
six>=1.10.0 # MIT
tripleo-common>=7.1.0 # Apache-2.0
paunch>=4.2.0 # Apache-2.0