docker-cmd hook

This hook takes the same format as the docker-compose hook, but makes
calls directly to the docker command rather than calling
docker-compose.

This hook currently supports the docker-compose v1 format, but in the
future will support other formats such as the kubernetes pod format.

TripleO will adopt this hook and will transition to using the pod
format when this hook supports it.

Co-Authored-By: Ian Main <imain@redhat.com>

Change-Id: I699107c3df64723a945c5d5ac82ae3a48b76700e
This commit is contained in:
Steve Baker 2016-11-03 09:23:29 +13:00 committed by Thomas Herve
parent 6fb26a228c
commit c9408f1706
7 changed files with 576 additions and 1 deletions

View File

@ -0,0 +1,9 @@
A hook which uses the `docker` command to deploy containers.
The hook currently supports specifying containers in the `docker-compose v1
format <https://docs.docker.com/compose/compose-file/#/version-1>`_. The
intention is for this hook to also support the kubernetes pod format.
A dedicated os-refresh-config script will remove running containers if a
deployment is removed or changed, then the docker-cmd hook will run any
containers in new or updated deployments.

View File

@ -0,0 +1,2 @@
os-apply-config
os-refresh-config

View File

@ -0,0 +1,6 @@
#!/bin/bash
set -x
SCRIPTDIR=$(dirname $0)
install -D -g root -o root -m 0755 ${SCRIPTDIR}/hook-docker-cmd.py /var/lib/heat-config/hooks/docker-cmd

View File

@ -0,0 +1,138 @@
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import subprocess
import sys
import yaml
DOCKER_CMD = os.environ.get('HEAT_DOCKER_CMD', 'docker')
log = None
def build_response(deploy_stdout, deploy_stderr, deploy_status_code):
return {
'deploy_stdout': deploy_stdout,
'deploy_stderr': deploy_stderr,
'deploy_status_code': deploy_status_code,
}
def docker_arg_map(key, value):
value = str(value).encode('ascii', 'ignore')
return {
'container_step_config': None,
'environment': "--env=%s" % value,
'image': value,
'net': "--net=%s" % value,
'pid': "--pid=%s" % value,
'privileged': "--privileged=%s" % 'true' if value else 'false',
'restart': "--restart=%s" % value,
'user': "--user=%s" % value,
'volumes': "--volume=%s" % value,
'volumes_from': "--volumes-from=%s" % value,
}.get(key, None)
def main(argv=sys.argv):
global log
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
c = json.load(sys.stdin)
input_values = dict((i['name'], i['value']) for i in c.get('inputs', {}))
if input_values.get('deploy_action') == 'DELETE':
json.dump(build_response(
'', '', 0), sys.stdout)
return
config = c.get('config', '')
if not config:
log.debug("No 'config' input found, nothing to do.")
json.dump(build_response(
'', '', 0), sys.stdout)
return
stdout = []
stderr = []
deploy_status_code = 0
# convert config to dict
if not isinstance(config, dict):
config = yaml.safe_load(config)
for container in config:
container_name = '%s__%s' % (c['name'], container)
cmd = [
DOCKER_CMD,
'run',
'--detach=true',
'--name',
container_name.encode('ascii', 'ignore'),
]
image_name = ''
for key in sorted(config[container]):
# These ones contain a list of values
if key in ['environment', 'volumes', 'volumes_from']:
for value in config[container][key]:
# Somehow the lists get empty values sometimes
if type(value) is unicode and not value.strip():
continue
cmd.append(docker_arg_map(key, value))
elif key == 'image':
image_name = config[container][key].encode('ascii', 'ignore')
else:
arg = docker_arg_map(key, config[container][key])
if arg:
cmd.append(arg)
# Image name must come last.
cmd.append(image_name)
log.debug(' '.join(cmd))
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
log.debug(cmd_stdout)
log.debug(cmd_stderr)
if cmd_stdout:
stdout.append(cmd_stdout)
if cmd_stderr:
stderr.append(cmd_stderr)
if subproc.returncode:
log.error("Error running %s. [%s]\n" % (cmd, subproc.returncode))
else:
log.debug('Completed %s' % cmd)
if subproc.returncode != 0:
deploy_status_code = subproc.returncode
json.dump(build_response(
'\n'.join(stdout), '\n'.join(stderr), deploy_status_code), sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -0,0 +1,144 @@
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import subprocess
import sys
import yaml
CONF_FILE = os.environ.get('HEAT_SHELL_CONFIG',
'/var/run/heat-config/heat-config')
WORKING_DIR = os.environ.get(
'HEAT_DOCKER_CMD_WORKING',
'/var/lib/heat-config/heat-config-docker-cmd')
DOCKER_CMD = os.environ.get('HEAT_DOCKER_CMD', 'docker')
log = None
def main(argv=sys.argv):
global log
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
if not os.path.exists(CONF_FILE):
log.error('No config file %s' % CONF_FILE)
return 1
if not os.path.isdir(WORKING_DIR):
os.makedirs(WORKING_DIR, 0o700)
try:
configs = json.load(open(CONF_FILE))
except ValueError:
pass
cmd_configs = list(build_configs(configs))
try:
delete_missing_projects(cmd_configs)
for c in cmd_configs:
delete_changed_project(c)
write_project(c)
except Exception as e:
log.exception(e)
def build_configs(configs):
for c in configs:
if c['group'] != 'docker-cmd':
continue
if not isinstance(c['config'], dict):
# convert config to dict
c['config'] = yaml.safe_load(c['config'])
yield c
def current_projects():
for proj_file in os.listdir(WORKING_DIR):
if proj_file.endswith('.json'):
proj = proj_file[:-5]
yield proj
def remove_project(proj):
proj_file = os.path.join(WORKING_DIR, '%s.json' % proj)
with open(proj_file, 'r') as f:
proj_data = json.load(f)
for name in extract_container_names(proj, proj_data):
remove_container(name)
os.remove(proj_file)
def remove_container(name):
cmd = [DOCKER_CMD, 'rm', '-f', name]
log.debug(' '.join(cmd))
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = subproc.communicate()
log.info(stdout)
log.debug(stderr)
def delete_missing_projects(configs):
config_names = [c['name'] for c in configs]
for proj in current_projects():
if proj not in config_names:
log.debug('%s no longer exists, deleting containers' % proj)
remove_project(proj)
def extract_container_names(proj, proj_data):
# For now, assume a docker-compose v1 format where the
# root keys are service names
for name in proj_data:
yield '%s__%s' % (proj, name)
def delete_changed_project(c):
proj = c['name']
proj_file = os.path.join(WORKING_DIR, '%s.json' % proj)
proj_data = c.get('config', {})
if os.path.isfile(proj_file):
with open(proj_file, 'r') as f:
prev_proj_data = json.load(f)
if proj_data != prev_proj_data:
log.debug('%s has changed, deleting containers' % proj)
remove_project(proj)
def write_project(c):
proj = c['name']
proj_file = os.path.join(WORKING_DIR, '%s.json' % proj)
proj_data = c.get('config', {})
with os.fdopen(os.open(
proj_file, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600),
'w') as f:
json.dump(proj_data, f, indent=2)
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -28,7 +28,16 @@ import sys
def main(argv=sys.argv):
with open(os.environ.get('TEST_STATE_PATH'), 'w') as f:
state_path = os.environ.get('TEST_STATE_PATH')
# handle multiple invocations by writing to numbered state path files
suffix = 0
while os.path.isfile(state_path):
suffix += 1
state_path = '%s_%s' % (os.environ.get('TEST_STATE_PATH'), suffix)
with open(state_path, 'w') as f:
json.dump({'env': dict(os.environ), 'args': argv}, f)
if 'TEST_RESPONSE' not in os.environ:

View File

@ -0,0 +1,267 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import os
import tempfile
import fixtures
from testtools import matchers
from tests.software_config import common
class HookDockerComposeTest(common.RunScriptTest):
data = {
"name": "abcdef001",
"group": "docker-cmd",
"config": {
"web": {
"name": "x",
"image": "xxx"
},
"db": {
"name": "y",
"image": "xxx",
"net": "host",
"restart": "always",
"privileged": True,
"user": "root",
"volumes": [
"/run:/run",
"db:/var/lib/db"
],
"environment": [
"KOLLA_CONFIG_STRATEGY=COPY_ALWAYS",
"FOO=BAR"
]
}
}
}
def setUp(self):
super(HookDockerComposeTest, self).setUp()
self.hook_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements',
'heat-config-docker-cmd/install.d/hook-docker-cmd.py')
self.cleanup_path = self.relative_path(
__file__,
'../..',
'hot/software-config/elements/heat-config-docker-cmd/',
'os-refresh-config/configure.d/50-heat-config-docker-cmd')
self.fake_tool_path = self.relative_path(
__file__,
'config-tool-fake.py')
self.working_dir = self.useFixture(fixtures.TempDir())
self.outputs_dir = self.useFixture(fixtures.TempDir())
self.test_state_path = self.outputs_dir.join('test_state.json')
self.env = os.environ.copy()
self.env.update({
'HEAT_DOCKER_CMD_WORKING': self.working_dir.join(),
'HEAT_DOCKER_CMD': self.fake_tool_path,
'TEST_STATE_PATH': self.test_state_path,
})
def test_hook(self):
self.env.update({
'TEST_RESPONSE': json.dumps({
'stdout': '',
'stderr': 'Creating abcdef001_db_1...'
})
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual(0, returncode, stderr)
self.assertEqual({
'deploy_stdout': '',
'deploy_stderr': 'Creating abcdef001_db_1...\n'
'Creating abcdef001_db_1...',
'deploy_status_code': 0
}, json.loads(stdout))
state_0 = self.json_from_file(self.test_state_path)
state_1 = self.json_from_file('%s_1' % self.test_state_path)
self.assertEqual([
self.fake_tool_path,
'run',
'--detach=true',
'--name',
'abcdef001__web',
'xxx'
], state_0['args'])
self.assertEqual([
self.fake_tool_path,
'run',
'--detach=true',
'--name',
'abcdef001__db',
'--env=KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'--env=FOO=BAR',
'--net=host',
'--privileged=true',
'--restart=always',
'--user=root',
'--volume=/run:/run',
'--volume=db:/var/lib/db',
'xxx'
], state_1['args'])
def test_hook_failed(self):
self.env.update({
'TEST_RESPONSE': json.dumps({
'stdout': '',
'stderr': 'Error: image library/xxx:latest not found',
'returncode': 1
})
})
returncode, stdout, stderr = self.run_cmd(
[self.hook_path], self.env, json.dumps(self.data))
self.assertEqual({
'deploy_stdout': '',
'deploy_stderr': 'Error: image library/xxx:latest not found\n'
'Error: image library/xxx:latest not found',
'deploy_status_code': 1
}, json.loads(stdout))
state_0 = self.json_from_file(self.test_state_path)
state_1 = self.json_from_file('%s_1' % self.test_state_path)
self.assertEqual([
self.fake_tool_path,
'run',
'--detach=true',
'--name',
'abcdef001__web',
'xxx'
], state_0['args'])
self.assertEqual([
self.fake_tool_path,
'run',
'--detach=true',
'--name',
'abcdef001__db',
'--env=KOLLA_CONFIG_STRATEGY=COPY_ALWAYS',
'--env=FOO=BAR',
'--net=host',
'--privileged=true',
'--restart=always',
'--user=root',
'--volume=/run:/run',
'--volume=db:/var/lib/db',
'xxx'
], state_1['args'])
def test_cleanup_deleted(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(json.dumps([self.data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the first run, abcdef001.json is written out, no docker calls made
configs_path = os.path.join(self.env['HEAT_DOCKER_CMD_WORKING'],
'abcdef001.json')
self.assertThat(configs_path, matchers.FileExists())
self.assertThat(self.test_state_path,
matchers.Not(matchers.FileExists()))
# run again with empty config data
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(json.dumps([]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the second run, abcdef001.json is deleted, docker rm is run on
# both containers
configs_path = os.path.join(self.env['HEAT_DOCKER_CMD_WORKING'],
'abcdef001.json')
self.assertThat(configs_path,
matchers.Not(matchers.FileExists()))
state_0 = self.json_from_file(self.test_state_path)
state_1 = self.json_from_file('%s_1' % self.test_state_path)
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'abcdef001__web',
], state_0['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'abcdef001__db',
], state_1['args'])
def test_cleanup_changed(self):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(json.dumps([self.data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the first run, abcdef001.json is written out, no docker calls made
configs_path = os.path.join(self.env['HEAT_DOCKER_CMD_WORKING'],
'abcdef001.json')
self.assertThat(configs_path, matchers.FileExists())
self.assertThat(self.test_state_path,
matchers.Not(matchers.FileExists()))
# run again with changed config data
new_data = copy.deepcopy(self.data)
new_data['config']['web']['image'] = 'yyy'
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(json.dumps([new_data]))
f.flush()
self.env['HEAT_SHELL_CONFIG'] = f.name
returncode, stdout, stderr = self.run_cmd(
[self.cleanup_path], self.env)
# on the second run, abcdef001.json is written with the new data,
# docker rm is run on both containers
configs_path = os.path.join(self.env['HEAT_DOCKER_CMD_WORKING'],
'abcdef001.json')
self.assertThat(configs_path, matchers.FileExists())
state_0 = self.json_from_file(self.test_state_path)
state_1 = self.json_from_file('%s_1' % self.test_state_path)
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'abcdef001__web',
], state_0['args'])
self.assertEqual([
self.fake_tool_path,
'rm',
'-f',
'abcdef001__db',
], state_1['args'])