Add module "container_puppet_config"

This module will do two things:

Summary:

(1) Generate container configs for each container that has a puppet
    deployment step; and but the files in
    /var/lib/tripleo-config/container-puppet-config
(2) Update the container-startup-config of the containers which have a new
    hash; so they get restarted later.

Details:

(1) Here are the steps that happen to generate the puppet container
    configs:

    - Create /var/lib/tripleo-config/container-puppet-config/step_X
    - Generate a JSON file, that is the same format as the
      well-known container-startup-configs (which are understood by
      Paunch and tripleo-container-manage Ansible role). It mimics
      the logic from THT/common/container-puppet.py to add the
      required configuration so the container can run.

(2) If a container has a new configuration, the TRIPLEO_CONFIG_HASH
    will be updated in the startup config of the container, so later
    Paunch or tripleo-container-manage Ansible role can restart the
    container so the config is applied.

Note: it processing a bunch of files and data, so it's better for it to
be a module and not an action plugin so the file generation can be
delegated on to the remote nodes instead of the undercloud.
In the future, we'll try to generate container configuration directly in
config-download so the data will be ready to be copied.

Change-Id: I6b656df725803db0c1cdaac6f534766398a15810
This commit is contained in:
Emilien Macchi 2020-01-13 19:41:41 -05:00
parent 769ecb6a20
commit 24e9c33ad4
5 changed files with 538 additions and 0 deletions

View File

@ -0,0 +1,14 @@
================================
Module - container_puppet_config
================================
This module provides for the following ansible plugin:
* container_puppet_config
.. ansibleautoplugin::
:module: tripleo_ansible/ansible_plugins/modules/container_puppet_config.py
:documentation: true
:examples: true

View File

@ -0,0 +1,500 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.parsing.convert_bool import boolean
from datetime import datetime
import base64
import copy
import glob
import json
import os
import shutil
import tempfile
import yaml
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: container_puppet_config
author:
- "Emilien Macchi (@EmilienM)"
version_added: '2.9'
short_description: Generate puppet containers configs
notes: []
description:
- Generate puppet containers configs
requirements:
- None
options:
no_archive:
description:
- Disables config-data archiving
type: bool
default: True
check_mode:
description:
- Ansible check mode is enabled
type: bool
default: False
config_vol_prefix:
description:
- Config volume prefix
type: str
default: '/var/lib/config-data'
debug:
description:
- Enable debug
type: bool
default: False
net_host:
description:
- Using host network
type: bool
default: True
puppet_config:
description: Path to the puppet configs
type: str
short_hostname:
description:
- Short hostname
type: str
step:
description:
- Step number
default: 6
type: int
"""
EXAMPLES = """
- name: Generate puppet container config for step 1
container_puppet_config:
step: 1
puppet-config: /var/lib/container-puppet/container-puppet.json
short_hostname: "{{ ansible_hostname }}"
"""
CONTAINER_PUPPET_CONFIG = '/var/lib/tripleo-config/container-puppet-config'
CONTAINER_STARTUP_CONFIG = '/var/lib/tripleo-config/container-startup-config'
CONTAINER_ENTRYPOINT = '/var/lib/container-puppet/container-puppet.sh'
class ContainerPuppetManager:
"""Notes about this module.
It will generate container config that will be consumed by the
tripleo-container-manage role that is using podman_container module.
"""
def __init__(self, module, results):
super(ContainerPuppetManager, self).__init__()
self.module = module
self.results = results
# parse args
args = self.module.params
# Set parameters
puppet_config = args['puppet_config']
data = json.loads(self._slurp(puppet_config))
self.step = args['step']
self.net_host = args['net_host']
self.debug = args['debug']
self.check = args['check_mode']
self.no_archive = args['no_archive']
self.config_vol_prefix = args['config_vol_prefix']
self.hostname = args['short_hostname']
config_path = os.path.join(CONTAINER_PUPPET_CONFIG,
'step_' + str(self.step))
# Make sure config_path exists
# Note: it'll cleanup old configs before creating new ones.
self._create_dir(config_path)
# Generate the container configs
config = self._get_config(self._merge_volumes_configs(data))
for k, v in config.items():
config_dest = os.path.join(config_path, k + '.json')
self._update_container_config(config_dest, v)
# Update container-startup-config with new config hashes
self._update_hashes()
self.module.exit_json(**self.results)
def _merge_volumes_configs(self, data):
"""Returns a list of puppet configs with unique config_volume keys.
:param data: list
:returns: list
This method takes in input a list of container puppet configs and
returns a list of container puppet configs with unique config_volume
keys. It will allow to run puppet for a single volume at a time and
avoid the situation where multiple configs using the same config
volume would run separately; which would cause race condition issues
because of the rsync commands executed at the end of puppet run.
To also saves time we support configuring 'shared' services at the same
time. For example configuring all of the heat services
in a single container pass makes sense and will save some time.
To support this we merge shared settings together here.
We key off of config_volume as this should be the same for a
given group of services. We are also now specifying the container
in which the services should be configured. This should match
in all instances where the volume name is also the same.
"""
returned_dict = {}
for config in data:
config_volume = config.get('config_volume')
if config_volume is None or config_volume == '':
continue
puppet_tags = config.get('puppet_tags')
step_config = config.get('step_config')
config_image = config.get('config_image')
volumes = config.get('volumes')
if config_volume in returned_dict:
# A config already exists for that config_volume,
# we'll append puppet_tags and step_config and extend volumes.
config_image_orig = (
returned_dict[config_volume]['config_image']
)
if volumes:
volumes_orig = returned_dict[config_volume].get('volumes', [])
if volumes_orig:
volumes = volumes_orig.extend(volumes)
returned_dict[config_volume]['volumes'] = (
sorted(set(volumes))
)
if puppet_tags is not None:
returned_dict[config_volume]['puppet_tags'] = '%s,%s' % (
returned_dict[config_volume]['puppet_tags'],
puppet_tags
)
if step_config is not None:
returned_dict[config_volume]['step_config'] = '%s\n%s' % (
returned_dict[config_volume]['step_config'],
step_config
)
if config_image != config_image_orig:
self.module.warn('{} config image does not match with '
'{}'.format(config_image,
config_image_orig))
else:
# This is a new config
returned_dict[config_volume] = config
return returned_dict
def _get_config(self, data):
"""Returns a list of puppet configs per container.
:param data: list
:returns: list
This method takes in input a list of dicts and returns
a dictionary which match with the podman_container module interface.
"""
returned_dict = {}
default_volumes = ['/etc/localtime:/etc/localtime:ro',
'/etc/puppet:/tmp/puppet-etc:ro',
'/etc/pki/ca-trust/extracted:'
'/etc/pki/ca-trust/extracted:ro',
'/etc/pki/tls/certs/ca-bundle.crt:'
'/etc/pki/tls/certs/ca-bundle.crt:ro',
'/etc/pki/tls/certs/ca-bundle.trust.crt:'
'/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
'/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
'%s:/var/lib/config-data'
':rw' % self.config_vol_prefix,
'/var/lib/container-puppet/puppetlabs/facter.conf:'
'/etc/puppetlabs/facter/facter.conf:ro',
'/var/lib/container-puppet/puppetlabs:'
'/opt/puppetlabs:ro',
'%s:%s:ro' % (CONTAINER_ENTRYPOINT,
CONTAINER_ENTRYPOINT),
'/usr/share/openstack-puppet/modules:'
'/usr/share/openstack-puppet/modules:ro',
'/dev/log:/dev/log:rw']
# Defaults
default_data = {
'user': 0,
'entrypoint': CONTAINER_ENTRYPOINT,
'environment': self._get_environment_config()
}
for config_volume, config in data.items():
cdata = copy.deepcopy(default_data)
volumes = copy.deepcopy(default_volumes)
cname = 'container-puppet-' + config_volume
if self.check:
volumes.append(
'/etc/puppet/check-mode:/tmp/puppet-check-mode:ro')
volumes += ['/etc/puppet/check-mode:'
'/tmp/puppet-check-mode:ro']
if self.net_host:
cdata['net'] = 'host'
volumes += ['/etc/hosts:/etc/hosts:ro']
else:
cdata['net'] = 'none'
cdata['environment']['PUPPET_TAGS'] = (
self._get_puppet_tags(config))
cdata['environment']['NAME'] = config_volume
for k, v in config.items():
if k == 'config_volume':
continue
if k == 'puppet_tags':
continue
if k == 'step_config':
step_config = 'include ::tripleo::packages\n' + v
cdata['environment']['STEP_CONFIG'] = step_config
continue
if k == 'config_image':
cdata['image'] = v
continue
if k == 'privileged':
cdata['privileged'] = v
continue
if k == 'volumes':
if isinstance(v, (list)):
volumes.extend(v)
else:
volumes += [v]
continue
# Keep this one at the very end to override any attribute:
cdata[k] = v
cdata['volumes'] = sorted(set(volumes))
returned_dict[cname] = cdata
return returned_dict
def _get_environment_config(self):
"""Returns common environment configs.
:returns: dict
"""
returned_env = {
'STEP': self._get_puppet_step(self.step),
'NET_HOST': str(self.net_host).lower(),
'DEBUG': str(self.debug).lower(),
}
if self.hostname is not None:
returned_env['HOSTNAME'] = self.hostname
if not self.no_archive:
returned_env['NO_ARCHIVE'] = ''
else:
returned_env['NO_ARCHIVE'] = self.no_archive
return returned_env
def _get_puppet_step(self, step):
"""Returns the step used by Puppet during a run."
:param step: integer
:returns: integer
"""
# When container_puppet_config is called at step1, it's to initialize
# configuration files for all services like they were deployed; so
# in Puppet it means after step5. Which is why we override the step
# just for the Puppet run.
# Note that it was the same behavior with container-puppet.py since
# STEP was set to 6 by default and wasn't overriden when the script
# was run at step1.
if step == 1:
return 6
return step
def _get_puppet_tags(self, config):
"""Returns Puppet tags.
:returns: string
"""
puppet_tags = 'file,file_line,concat,augeas,cron'
config_puppet_tags = config.get('puppet_tags')
if config_puppet_tags is not None:
puppet_tags += ',%s' % config_puppet_tags
return puppet_tags
def _exists(self, path):
"""Returns True if a patch exists.
:param path: string
:returns: boolean
"""
if os.path.exists(path):
return True
def _remove_dir(self, path):
"""Remove a directory.
:param path: string
"""
if self._exists(path):
shutil.rmtree(path)
def _create_dir(self, path):
"""Creates a directory.
:param path: string
"""
if self._exists(path):
self._remove_dir(path)
os.makedirs(path)
def _find(self, path):
"""Returns a list of files in a directory.
:param path: string
:returns: list
"""
configs = []
if self._exists(path):
path = os.path.join(path, '*')
configs = glob.glob(path)
else:
self.module.warn('{} does not exists'.format(path))
return configs
def _slurp(self, path):
"""Slurps a file and return its content.
:param path: string
:returns: string
"""
if self._exists(path):
f = open(path, 'r')
return f.read()
else:
self.module.warn('{} was not found.'.format(path))
return ''
def _update_container_config(self, path, config):
"""Update a container config.
:param path: string
:param config: string
"""
f = open(path, 'w')
f.write(json.dumps(config, indent=2).encode('utf-8'))
os.chmod(path, 0o600)
def _get_config_hash(self, config_volume):
"""Returns a config hash from a config_volume.
:param config_volume: string
:returns: string
"""
hashfile = "%s.md5sum" % config_volume
hash_data = ''
if self._exists(hashfile):
return self._slurp(hashfile).strip('\n')
def _get_config_base(self, prefix, volume):
"""Returns a config base path for a specific volume.
:param prefix: string
:param volume: string
:returns: string
"""
# crawl the volume's path upwards until we find the
# volume's base, where the hashed config file resides
path = volume
base = prefix.rstrip(os.path.sep)
base_generated = os.path.join(base, 'puppet-generated')
while path.startswith(prefix):
dirname = os.path.dirname(path)
if dirname == base or dirname == base_generated:
return path
else:
path = dirname
self.module.fail_json(
msg='Could not find config base for: {} '
'with prefix: {}'.format(volume, prefix))
def _match_config_volumes(self, config):
"""Return a list of volumes that match a config.
:param config: dict
:returns: list
"""
# Match the mounted config volumes - we can't just use the
# key as e.g "novacomute" consumes config-data/nova
prefix = self.config_vol_prefix
try:
volumes = config.get('volumes', [])
except AttributeError:
self.module.fail_json(
msg='Error fetching volumes. Prefix: '
'{} - Config: {}'.format(prefix, config))
return sorted([self._get_config_base(prefix, v.split(":")[0])
for v in volumes if v.startswith(prefix)])
def _update_hashes(self):
"""Update container startup config with new config hashes if needed.
"""
startup_config_path = os.path.join(CONTAINER_STARTUP_CONFIG,
'step_' + str(self.step))
for config in self._find(startup_config_path):
old_config_hash = ''
cname = os.path.splitext(os.path.basename(config))[0]
startup_config_json = json.loads(self._slurp(config))
config_volumes = self._match_config_volumes(startup_config_json)
config_hashes = [
self._get_config_hash(vol_path) for vol_path in config_volumes
]
config_hashes = filter(None, config_hashes)
if 'environment' in startup_config_json:
old_config_hash = startup_config_json['environment'].get(
'TRIPLEO_CONFIG_HASH')
if config_hashes is not None and config_hashes:
config_hash = '-'.join(config_hashes)
if config_hash == old_config_hash:
# config doesn't need an update
continue
self.module.warn('Config change detected for {}, new '
'hash: {}'.format(config, config_hash))
if 'environment' not in startup_config_json:
startup_config_json['environment'] = {}
startup_config_json['environment']['TRIPLEO_CONFIG_HASH'] = (
config_hash)
self._update_container_config(config, startup_config_json)
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=True,
)
results = dict(
changed=False
)
ContainerPuppetManager(module, results)
if __name__ == '__main__':
main()

View File

@ -30,6 +30,7 @@
# cpuset_cpus: "{{ lookup('dict', container_data).value.cpuset_cpus | default(omit) }}"
debug: true
detach: "{{ lookup('dict', container_data).value.detach | default(true) }}"
entrypoint: "{{ lookup('dict', container_data).value.entrypoint | default(omit) }}"
env: "{{ lookup('dict', container_data).value.environment | default(omit) }}"
env_file: "{{ lookup('dict', container_data).value.env_file | default(omit) }}"
etc_hosts: "{{ lookup('dict', container_data).value.extra_hosts | default({}) }}"

View File

@ -0,0 +1,23 @@
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tripleo_ansible.ansible_plugins.modules import container_puppet_config
from tripleo_ansible.tests import base as tests_base
class TestContainerPuppetConfig(tests_base.TestCase):
def test_run(self):
# TODO(emilien) write actual tests
pass