Merge "Refactor undercloud deploy to use Ansible."

This commit is contained in:
Zuul
2017-11-08 05:09:39 +00:00
committed by Gerrit Code Review
5 changed files with 313 additions and 593 deletions

View File

@@ -1,227 +0,0 @@
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
try:
import http.server as BaseHTTPServer # Python3
except ImportError:
import BaseHTTPServer # Python2
import datetime
import json
import logging
import os
from oslo_utils import timeutils
TOMORROW = (timeutils.utcnow() + datetime.timedelta(days=1)).isoformat()
VERSION_RESPONSE_GET = {
"version": {
"status": "stable",
"updated": datetime.datetime.utcnow().isoformat(),
"media-types": [{
"base": "application/json",
"type": "application/vnd.openstack.identity-v3+json"
}],
"id": "v3.6",
"links": [{
"href": "http://127.0.0.1:%(api_port)s/v3/",
"rel": "self"
}]
}
}
TOKEN_RESPONSE_POST = {
"token": {
"is_domain": False,
"methods": ["password"],
"roles": [{
"id": "4c8de39b96794ab28bf37a0b842b8bc8",
"name": "admin"
}],
"expires_at": TOMORROW,
"project": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "admin",
"name": "admin"
},
"catalog": [{
"endpoints": [{
"url": "http://127.0.0.1:%(heat_port)s/v1/admin",
"interface": "public",
"region": "regionOne",
"region_id": "regionOne",
"id": "2809305628004fb391b3d0254fb5b4f7"
}, {
"url": "http://127.0.0.1:%(heat_port)s/v1/admin",
"interface": "internal",
"region": "regionOne",
"region_id": "regionOne",
"id": "2809305628004fb391b3d0254fb5b4f7"
}, {
"url": "http://127.0.0.1:%(heat_port)s/v1/admin",
"interface": "admin",
"region": "regionOne",
"region_id": "regionOne",
"id": "2809305628004fb391b3d0254fb5b4f7"
}],
"type": "orchestration",
"id": "96a549e3961d45cabe883dd17c5835be",
"name": "heat"
}, {
"endpoints": [{
"url": "http://127.0.0.1:%(api_port)s/v3",
"interface": "public",
"region": "regionOne",
"region_id": "regionOne",
"id": "eca215878e404a2d9dcbcc7f6a027165"
}, {
"url": "http://127.0.0.1:%(api_port)s/v3",
"interface": "internal",
"region": "regionOne",
"region_id": "regionOne",
"id": "eca215878e404a2d9dcbcc7f6a027165"
}, {
"url": "http://127.0.0.1:%(api_port)s/v3",
"interface": "admin",
"region": "regionOne",
"region_id": "regionOne",
"id": "eca215878e404a2d9dcbcc7f6a027165"
}],
"type": "identity",
"id": "a785f0b7603042d1bf59237c71af2f15",
"name": "keystone"
}],
"user": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "8b7b4c094f934e8c83aa7fe12591dc6c",
"name": "admin"
},
"audit_ids": ["F6ONJ8fCT6i_CFTbmC0vBA"],
"issued_at": datetime.datetime.utcnow().isoformat()
}
}
STACK_USER_ROLE_GET = {
"links": {
"self": "http://127.0.0.1:%(api_port)s/v3/roles",
"previous": None,
"next": None
},
"roles": [{
"domain_id": None,
"id": "b123456",
"links": {
"self": "http://127.0.0.1:%(api_port)s/v3/roles/b123456"
},
"name": "heat_stack_user"
}]
}
STACK_USER_POST = {
"user": {
"name": "heat_stack_user",
"links": {
"self": "http://127.0.0.1:%(api_port)s/v3/users/c123456"
},
"domain_id": "default",
"enabled": True,
"email": "heat@localhost",
"id": "c123456"
}
}
AUTH_TOKEN_GET = {
"token": {
"issued_at": datetime.datetime.utcnow().isoformat(),
"audit_ids": ["PUrztDYYRBeq-C8CKr-kEw"],
"methods": ["password"],
"expires_at": TOMORROW,
"user": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "8b7b4c094f934e8c83aa7fe12591dc6c",
"name": "admin"
}
}
}
class FakeKeystone(BaseHTTPServer.BaseHTTPRequestHandler):
log = logging.getLogger(__name__ + ".FakeKeystone")
def _get_port_from_env(self):
return os.environ.get('FAKE_KEYSTONE_PORT', '35358')
def _get_heat_port_from_env(self):
return os.environ.get('HEAT_API_PORT', '8006')
def _format(self, my_json):
return (json.dumps(my_json) % {'api_port': self._get_port_from_env(),
'heat_port': self._get_heat_port_from_env()})
def _send_headers(self, code=200):
self.send_response(code)
self.send_header('Content-type', 'application/json')
self.send_header('X-Auth-User', 'admin')
self.send_header('X-Subject-Token', '123456789')
self.end_headers()
def do_GET(self):
if self.path in ['/', '/v3', '/v3/']:
self._send_headers(300)
self.wfile.write(self._format(VERSION_RESPONSE_GET))
elif self.path == '/v3/auth/tokens':
self._send_headers(200)
self.wfile.write(self._format(AUTH_TOKEN_GET))
elif self.path.startswith('/v3/roles?name=heat_stack_user'):
self._send_headers()
self.wfile.write(self._format(STACK_USER_ROLE_GET))
else:
raise Exception('Not Implemented: %s' % self.path)
def do_POST(self):
if self.path == '/v3/auth/tokens':
self._send_headers(201)
self.wfile.write(self._format(TOKEN_RESPONSE_POST))
elif self.path == '/v3/users':
self._send_headers()
self.wfile.write(self._format(STACK_USER_POST))
else:
raise Exception('Not Implemented: %s' % self.path)
def do_PUT(self):
if self.path.startswith('/v3/projects/admin/users/'):
self._send_headers()
pass # NOTE: 200 response is good enough here
else:
raise Exception('Not Implemented: %s' % self.path)
def log_message(self, format, *args):
return
def launch():
port = os.environ.get('FAKE_KEYSTONE_PORT', '35358')
httpd = BaseHTTPServer.HTTPServer(('127.0.0.1', int(port)), FakeKeystone)
httpd.serve_forever()

View File

@@ -14,23 +14,104 @@
# #
from __future__ import print_function from __future__ import print_function
import datetime
import json
import logging import logging
import os import os
import signal import signal
import subprocess import subprocess
import tempfile import tempfile
from oslo_utils import timeutils
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
NEXT_DAY = (timeutils.utcnow() + datetime.timedelta(days=2)).isoformat()
FAKE_TOKEN_RESPONSE = {
"token": {
"is_domain": False,
"methods": ["password"],
"roles": [{
"id": "4c8de39b96794ab28bf37a0b842b8bc8",
"name": "admin"
}],
"expires_at": NEXT_DAY,
"project": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "admin",
"name": "admin"
},
"catalog": [{
"endpoints": [{
"url": "http://127.0.0.1:%(heat_port)s/v1/admin",
"interface": "public",
"region": "regionOne",
"region_id": "regionOne",
"id": "2809305628004fb391b3d0254fb5b4f7"
}, {
"url": "http://127.0.0.1:%(heat_port)s/v1/admin",
"interface": "internal",
"region": "regionOne",
"region_id": "regionOne",
"id": "2809305628004fb391b3d0254fb5b4f7"
}, {
"url": "http://127.0.0.1:%(heat_port)s/v1/admin",
"interface": "admin",
"region": "regionOne",
"region_id": "regionOne",
"id": "2809305628004fb391b3d0254fb5b4f7"
}],
"type": "orchestration",
"id": "96a549e3961d45cabe883dd17c5835be",
"name": "heat"
}, {
"endpoints": [{
"url": "http://127.0.0.1/v3",
"interface": "public",
"region": "regionOne",
"region_id": "regionOne",
"id": "eca215878e404a2d9dcbcc7f6a027165"
}, {
"url": "http://127.0.0.1/v3",
"interface": "internal",
"region": "regionOne",
"region_id": "regionOne",
"id": "eca215878e404a2d9dcbcc7f6a027165"
}, {
"url": "http://127.0.0.1/v3",
"interface": "admin",
"region": "regionOne",
"region_id": "regionOne",
"id": "eca215878e404a2d9dcbcc7f6a027165"
}],
"type": "identity",
"id": "a785f0b7603042d1bf59237c71af2f15",
"name": "keystone"
}],
"user": {
"domain": {
"id": "default",
"name": "Default"
},
"id": "8b7b4c094f934e8c83aa7fe12591dc6c",
"name": "admin"
},
"audit_ids": ["F6ONJ8fCT6i_CFTbmC0vBA"],
"issued_at": datetime.datetime.utcnow().isoformat()
}
}
class HeatBaseLauncher(object): class HeatBaseLauncher(object):
# The init function will need permission to touch these files # The init function will need permission to touch these files
# and chown them accordingly for the heat user # and chown them accordingly for the heat user
def __init__(self, api_port, ks_port, container_image, user='heat'): def __init__(self, api_port, container_image, user='heat'):
self.api_port = api_port self.api_port = api_port
self.ks_port = ks_port
self.policy_file = os.path.join(os.path.dirname(__file__), self.policy_file = os.path.join(os.path.dirname(__file__),
'noauth_policy.json') 'noauth_policy.json')
@@ -40,19 +121,21 @@ class HeatBaseLauncher(object):
self.sql_db = os.path.join(self.install_tmp, 'heat.sqlite') self.sql_db = os.path.join(self.install_tmp, 'heat.sqlite')
self.log_file = os.path.join(self.install_tmp, 'heat.log') self.log_file = os.path.join(self.install_tmp, 'heat.log')
self.config_file = os.path.join(self.install_tmp, 'heat.conf') self.config_file = os.path.join(self.install_tmp, 'heat.conf')
self.token_file = os.path.join(self.install_tmp, 'token_file.json')
self._write_fake_keystone_token(api_port, self.token_file)
self._write_heat_config(self.config_file, self._write_heat_config(self.config_file,
self.sql_db, self.sql_db,
self.log_file, self.log_file,
api_port, api_port,
ks_port, self.policy_file,
self.policy_file) self.token_file)
uid = int(self.get_heat_uid()) uid = int(self.get_heat_uid())
gid = int(self.get_heat_gid()) gid = int(self.get_heat_gid())
os.chown(self.install_tmp, uid, gid) os.chown(self.install_tmp, uid, gid)
os.chown(self.config_file, uid, gid) os.chown(self.config_file, uid, gid)
def _write_heat_config(self, config_file, sqlite_db, log_file, api_port, def _write_heat_config(self, config_file, sqlite_db, log_file, api_port,
ks_port, policy_file): policy_file, token_file):
heat_config = ''' heat_config = '''
[DEFAULT] [DEFAULT]
log_file = %(log_file)s log_file = %(log_file)s
@@ -63,9 +146,14 @@ deferred_auth_method = password
num_engine_workers=1 num_engine_workers=1
convergence_engine = false convergence_engine = false
max_json_body_size = 8388608 max_json_body_size = 8388608
heat_metadata_server_url=http://127.0.0.1:%(api_port)s/
default_deployment_signal_transport = HEAT_SIGNAL default_deployment_signal_transport = HEAT_SIGNAL
max_nested_stack_depth = 6 max_nested_stack_depth = 6
keystone_backend = heat.engine.clients.os.keystone.fake_keystoneclient\
.FakeKeystoneClient
[noauth]
token_response = %(token_file)s
[heat_all] [heat_all]
enabled_services = api,engine enabled_services = api,engine
@@ -85,28 +173,27 @@ api_paste_config = /usr/share/heat/api-paste-dist.ini
[oslo_policy] [oslo_policy]
policy_file = %(policy_file)s policy_file = %(policy_file)s
[clients_keystone]
auth_uri=http://127.0.0.1:%(ks_port)s
[keystone_authtoken]
auth_type = password
auth_url=http://127.0.0.1:%(ks_port)s
[yaql] [yaql]
memory_quota=900000 memory_quota=900000
limit_iterators=9000 limit_iterators=9000
''' % {'sqlite_db': sqlite_db, 'log_file': log_file, ''' % {'sqlite_db': sqlite_db, 'log_file': log_file,
'api_port': api_port, 'ks_port': ks_port, 'api_port': api_port, 'policy_file': policy_file,
'policy_file': policy_file} 'token_file': token_file}
with open(config_file, 'w') as temp_file: with open(config_file, 'w') as temp_file:
temp_file.write(heat_config) temp_file.write(heat_config)
def _write_fake_keystone_token(self, heat_api_port, config_file):
ks_token = json.dumps(FAKE_TOKEN_RESPONSE) % {'heat_port':
heat_api_port}
with open(config_file, 'w') as temp_file:
temp_file.write(ks_token)
class HeatDockerLauncher(HeatBaseLauncher): class HeatDockerLauncher(HeatBaseLauncher):
def __init__(self, api_port, ks_port, container_image, user='heat'): def __init__(self, api_port, container_image, user='heat'):
super(HeatDockerLauncher, self).__init__(api_port, ks_port, super(HeatDockerLauncher, self).__init__(api_port, container_image,
container_image, user) user)
def launch_heat(self): def launch_heat(self):
cmd = [ cmd = [
@@ -175,9 +262,9 @@ class HeatDockerLauncher(HeatBaseLauncher):
class HeatNativeLauncher(HeatBaseLauncher): class HeatNativeLauncher(HeatBaseLauncher):
def __init__(self, api_port, ks_port, container_image, user='heat'): def __init__(self, api_port, container_image, user='heat'):
super(HeatNativeLauncher, self).__init__(api_port, ks_port, super(HeatNativeLauncher, self).__init__(api_port, container_image,
container_image, user) user)
def launch_heat(self): def launch_heat(self):
os.execvp('heat-all', ['heat-all', '--config-file', self.config_file]) os.execvp('heat-all', ['heat-all', '--config-file', self.config_file])

View File

@@ -167,7 +167,7 @@ class ClientWrapper(object):
self._object_store = None self._object_store = None
self._local_orchestration = None self._local_orchestration = None
def local_orchestration(self, api_port, keystone_port): def local_orchestration(self, api_port):
"""Returns an local_orchestration service client""" """Returns an local_orchestration service client"""
if self._local_orchestration is not None: if self._local_orchestration is not None:
@@ -185,7 +185,6 @@ class ClientWrapper(object):
client = heat_client( client = heat_client(
endpoint='http://127.0.0.1:%s/v1/admin' % api_port, endpoint='http://127.0.0.1:%s/v1/admin' % api_port,
auth_url='http://127.0.0.1:%s/v3' % keystone_port,
username='admin', username='admin',
password='fake', password='fake',
region_name='regionOne', region_name='regionOne',

View File

@@ -15,7 +15,6 @@
import mock import mock
import os import os
import subprocess
from tripleoclient.tests.v1.test_plugin import TestPluginV1 from tripleoclient.tests.v1.test_plugin import TestPluginV1
@@ -86,37 +85,3 @@ class TestUndercloudDeploy(TestPluginV1):
mock_dump.assert_called_once_with(expected_dict, mock_dump.assert_called_once_with(expected_dict,
mock_open_handle, mock_open_handle,
default_flow_style=False) default_flow_style=False)
@mock.patch('subprocess.check_call', autospec=True)
def test_install_prerequisites(self, mock_check_call):
mock_check_call.side_effect = [
True, True,
subprocess.CalledProcessError(1, ''), True]
arglist = []
verifylist = []
self.check_parser(self.cmd, arglist, verifylist)
self.cmd._install_prerequisites(False)
mock_check_call.assert_has_calls([
mock.call(['rpm', '-q', 'foo']),
mock.call(['rpm', '-q', 'bar']),
mock.call(['rpm', '-q', 'baz']),
mock.call(['yum', '-y', 'install', 'baz'])
])
@mock.patch('subprocess.check_call', autospec=True)
def test_fail_prerequisites(self, mock_check_call):
mock_check_call.side_effect = [
True, subprocess.CalledProcessError(127, ''), True, True]
arglist = []
verifylist = []
self.check_parser(self.cmd, arglist, verifylist)
try:
self.cmd._install_prerequisites(False)
except Exception as e:
self.assertTrue('Failed to check for prerequisites: '
'bar, the exit status 127' in str(e))
mock_check_call.assert_has_calls([
mock.call(['rpm', '-q', 'foo']),
mock.call(['rpm', '-q', 'bar'])])

View File

@@ -15,16 +15,16 @@
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import itertools import glob
import logging import logging
import netaddr import netaddr
import os import os
import pwd import pwd
import signal
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
import time import time
import traceback
import yaml import yaml
try: try:
@@ -40,41 +40,28 @@ except ImportError:
from cliff import command from cliff import command
from heatclient.common import event_utils from heatclient.common import event_utils
from heatclient.common import template_utils from heatclient.common import template_utils
from heatclient.common import utils as heat_utils
from openstackclient.i18n import _ from openstackclient.i18n import _
from tripleoclient import constants from tripleoclient import constants
from tripleoclient import exceptions from tripleoclient import exceptions
from tripleoclient import fake_keystone
from tripleoclient import heat_launcher from tripleoclient import heat_launcher
from tripleo_common.utils import passwords as password_utils from tripleo_common.utils import passwords as password_utils
REQUIRED_PACKAGES = iter([ # For ansible download
'python-heat-agent', from tripleo_common.utils import config
'python-heat-agent-apply-config',
'python-heat-agent-hiera',
'python-heat-agent-puppet',
'python-heat-agent-docker-cmd',
'python-heat-agent-json-file',
'python-heat-agent-ansible',
'python-ipaddr',
'python-tripleoclient',
'docker',
'openvswitch',
'puppet-tripleo',
'yum-plugin-priorities',
'openstack-tripleo-common',
'openstack-tripleo-heat-templates',
'deltarpm'
])
ANSIBLE_INVENTORY = """
[targets]
overcloud ansible_connection=local
INSTALLER_ENV = { [Undercloud]
'OS_AUTH_URL': 'http://127.0.0.1:35358', overcloud
'OS_USERNAME': 'foo',
'OS_PROJECT_NAME': 'foo', [{hostname}]
'OS_PASSWORD': 'bar' overcloud
} """
class DeployUndercloud(command.Command): class DeployUndercloud(command.Command):
@@ -82,37 +69,12 @@ class DeployUndercloud(command.Command):
log = logging.getLogger(__name__ + ".DeployUndercloud") log = logging.getLogger(__name__ + ".DeployUndercloud")
auth_required = False auth_required = False
prerequisites = REQUIRED_PACKAGES heat_pid = None
def _get_hostname(self): def _get_hostname(self):
p = subprocess.Popen(["hostname", "-s"], stdout=subprocess.PIPE) p = subprocess.Popen(["hostname", "-s"], stdout=subprocess.PIPE)
return p.communicate()[0].rstrip() return p.communicate()[0].rstrip()
def _install_prerequisites(self, install_heat_native):
print('Checking for installed prerequisites ...')
processed = []
if install_heat_native:
self.prerequisites = itertools.chain(
self.prerequisites,
['openstack-heat-api', 'openstack-heat-engine',
'openstack-heat-monolith'])
for p in self.prerequisites:
try:
subprocess.check_call(['rpm', '-q', p])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
processed.append(p)
elif e.returncode != 0:
raise Exception('Failed to check for prerequisites: '
'%s, the exit status %s'
% (p, e.returncode))
if len(processed) > 0:
print('Installing prerequisites ...')
subprocess.check_call(['yum', '-y', 'install'] + processed)
def _configure_puppet(self): def _configure_puppet(self):
print('Configuring puppet modules symlinks ...') print('Configuring puppet modules symlinks ...')
src = constants.TRIPLEO_PUPPET_MODULES src = constants.TRIPLEO_PUPPET_MODULES
@@ -126,29 +88,6 @@ class DeployUndercloud(command.Command):
os.rename(tmpf, os.path.join(dst, obj)) os.rename(tmpf, os.path.join(dst, obj))
os.rmdir(tmp) os.rmdir(tmp)
def _lookup_tripleo_server_stackid(self, client, stack_id):
server_stack_id = None
for X in client.resources.list(stack_id, nested_depth=6):
if X.resource_type in (
'OS::TripleO::Server',
'OS::TripleO::UndercloudServer'):
server_stack_id = X.physical_resource_id
return server_stack_id
def _launch_os_collect_config(self, keystone_port, stack_id):
print('Launching os-collect-config ...')
os.execvp('os-collect-config',
['os-collect-config',
'--polling-interval', '3',
'--heat-auth-url', 'http://127.0.0.1:%s/v3' % keystone_port,
'--heat-password', 'fake',
'--heat-user-id', 'admin',
'--heat-project-id', 'admin',
'--heat-stack-id', stack_id,
'--heat-resource-name', 'deployed-server', 'heat'])
def _wait_local_port_ready(self, api_port): def _wait_local_port_ready(self, api_port):
count = 0 count = 0
while count < 30: while count < 30:
@@ -164,135 +103,6 @@ class DeployUndercloud(command.Command):
pass pass
return False return False
def _heat_deploy(self, stack_name, template_path, parameters,
environments, timeout, api_port, ks_port):
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
# NOTE(dprince): we use our own client here because we set
# auth_required=False above because keystone isn't running when this
# command starts
tripleoclients = self.app.client_manager.tripleoclient
orchestration_client = tripleoclients.local_orchestration(api_port,
ks_port)
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'environment': env,
'files': files,
}
if timeout:
stack_args['timeout_mins'] = timeout
self.log.info("Performing Heat stack create")
stack = orchestration_client.stacks.create(**stack_args)
stack_id = stack['stack']['id']
event_list_pid = self._fork_heat_event_list()
self.log.info("Looking up server stack id...")
server_stack_id = None
# NOTE(dprince) wait a bit to create the server_stack_id resource
for c in range(timeout * 60):
time.sleep(1)
server_stack_id = self._lookup_tripleo_server_stackid(
orchestration_client, stack_id)
status = orchestration_client.stacks.get(stack_id).status
if status == 'FAILED':
event_utils.poll_for_events(orchestration_client, stack_name)
msg = ('Stack failed before deployed-server resource '
'created.')
raise Exception(msg)
if server_stack_id:
break
if not server_stack_id:
msg = ('Unable to find deployed server stack id. '
'See tripleo-heat-templates to ensure proper '
'"deployed-server" usage.')
raise Exception(msg)
self.log.debug("server_stack_id: %s" % server_stack_id)
pid = None
status = 'FAILED'
try:
pid = os.fork()
if pid == 0:
self._launch_os_collect_config(ks_port, server_stack_id)
else:
while True:
status = orchestration_client.stacks.get(stack_id).status
self.log.info(status)
if status in ['COMPLETE', 'FAILED']:
break
time.sleep(5)
finally:
if pid:
os.kill(pid, signal.SIGKILL)
if event_list_pid:
os.kill(event_list_pid, signal.SIGKILL)
stack_get = orchestration_client.stacks.get(stack_id)
status = stack_get.status
if status != 'FAILED':
pw_rsrc = orchestration_client.resources.get(
stack_id, 'DefaultPasswords')
passwords = {p.title().replace("_", ""): v for p, v in
pw_rsrc.attributes.get('passwords', {}).items()}
return passwords
else:
msg = "Stack create failed, reason: %s" % stack_get.reason
raise Exception(msg)
def _fork_heat_event_list(self):
pid = os.fork()
if pid == 0:
try:
os.setpgrp()
os.setgid(pwd.getpwnam('nobody').pw_gid)
os.setuid(pwd.getpwnam('nobody').pw_uid)
except KeyError:
raise exceptions.DeploymentError(
"Please create a 'nobody' user account before "
"proceeding.")
subprocess.check_call(['openstack', 'stack', 'event', 'list',
'undercloud', '--follow',
'--nested-depth', '6'], env=INSTALLER_ENV)
sys.exit(0)
else:
return pid
def _fork_fake_keystone(self):
pid = os.fork()
if pid == 0:
try:
os.setpgrp()
os.setgid(pwd.getpwnam('nobody').pw_gid)
os.setuid(pwd.getpwnam('nobody').pw_uid)
except KeyError:
raise exceptions.DeploymentError(
"Please create a 'nobody' user account before "
"proceeding.")
fake_keystone.launch()
sys.exit(0)
else:
return pid
def _update_passwords_env(self, passwords=None): def _update_passwords_env(self, passwords=None):
pw_file = os.path.join(os.environ.get('HOME', ''), pw_file = os.path.join(os.environ.get('HOME', ''),
'tripleo-undercloud-passwords.yaml') 'tripleo-undercloud-passwords.yaml')
@@ -351,10 +161,57 @@ class DeployUndercloud(command.Command):
} }
return data return data
def _deploy_tripleo_heat_templates(self, parsed_args): def _kill_heat(self):
"""Deploy the fixed templates in TripleO Heat Templates""" if self.heat_pid:
parameters = {} self.heat_launch.kill_heat(self.heat_pid)
pid, ret = os.waitpid(self.heat_pid, 0)
self.heat_pid = None
def _launch_heat(self, parsed_args):
# we do this as root to chown config files properly for docker, etc.
if parsed_args.heat_native:
self.heat_launch = heat_launcher.HeatNativeLauncher(
parsed_args.heat_api_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
else:
self.heat_launch = heat_launcher.HeatDockerLauncher(
parsed_args.heat_api_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
# NOTE(dprince): we launch heat with fork exec because
# we don't want it to inherit our args. Launching heat
# as a "library" would be cool... but that would require
# more refactoring. It runs a single process and we kill
# it always below.
self.heat_pid = os.fork()
if self.heat_pid == 0:
if parsed_args.heat_native:
try:
uid = pwd.getpwnam(parsed_args.heat_user).pw_uid
gid = pwd.getpwnam(parsed_args.heat_user).pw_gid
except KeyError:
raise exceptions.DeploymentError(
"Please create a %s user account before "
"proceeding." % parsed_args.heat_user)
os.setgid(gid)
os.setuid(uid)
self.heat_launch.heat_db_sync()
# Exec() never returns.
self.heat_launch.launch_heat()
# NOTE(dprince): we use our own client here because we set
# auth_required=False above because keystone isn't running when this
# command starts
tripleoclients = self.app.client_manager.tripleoclient
orchestration_client = \
tripleoclients.local_orchestration(parsed_args.heat_api_port)
return orchestration_client
def _setup_heat_environments(self, parsed_args):
tht_root = parsed_args.templates tht_root = parsed_args.templates
# generate jinja templates # generate jinja templates
args = ['python', 'tools/process-templates.py', '--roles-data', args = ['python', 'tools/process-templates.py', '--roles-data',
@@ -379,6 +236,12 @@ class DeployUndercloud(command.Command):
tht_root, 'environments', 'undercloud.yaml') tht_root, 'environments', 'undercloud.yaml')
environments.append(undercloud_env_path) environments.append(undercloud_env_path)
# use deployed-server because we run os-collect-config locally
deployed_server_env = os.path.join(
tht_root, 'environments',
'config-download-environment.yaml')
environments.append(deployed_server_env)
# use deployed-server because we run os-collect-config locally # use deployed-server because we run os-collect-config locally
deployed_server_env = os.path.join( deployed_server_env = os.path.join(
tht_root, 'environments', tht_root, 'environments',
@@ -388,7 +251,7 @@ class DeployUndercloud(command.Command):
if parsed_args.environment_files: if parsed_args.environment_files:
environments.extend(parsed_args.environment_files) environments.extend(parsed_args.environment_files)
with tempfile.NamedTemporaryFile() as tmp_env_file: with tempfile.NamedTemporaryFile(delete=False) as tmp_env_file:
tmp_env = self._generate_hosts_parameters() tmp_env = self._generate_hosts_parameters()
ip_nw = netaddr.IPNetwork(parsed_args.local_ip) ip_nw = netaddr.IPNetwork(parsed_args.local_ip)
@@ -401,26 +264,112 @@ class DeployUndercloud(command.Command):
default_flow_style=False) default_flow_style=False)
environments.append(tmp_env_file.name) environments.append(tmp_env_file.name)
undercloud_yaml = os.path.join(tht_root, 'overcloud.yaml') return environments
passwords = self._heat_deploy(parsed_args.stack, undercloud_yaml,
parameters, environments,
parsed_args.timeout,
parsed_args.heat_api_port,
parsed_args.fake_keystone_port)
if passwords:
# Get legacy passwords/secrets generated via heat
# These need to be written to the passwords file
# to avoid re-creating them every update
self._update_passwords_env(passwords)
return True
def _write_credentials(self): def _deploy_tripleo_heat_templates(self, orchestration_client,
fn = os.path.expanduser('~/installer_stackrc') parsed_args):
with os.fdopen(os.open(fn, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f: """Deploy the fixed templates in TripleO Heat Templates"""
f.write('# credentials to use while the undercloud '
'installer is running') environments = self._setup_heat_environments(parsed_args)
for k, v in INSTALLER_ENV.items():
f.write('export %s=%s\n' % (k, v)) self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_path = os.path.join(parsed_args.templates, 'overcloud.yaml')
template_files, template = \
template_utils.get_template_contents(template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
stack_name = parsed_args.stack
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'environment': env,
'files': files,
}
if parsed_args.timeout:
stack_args['timeout_mins'] = parsed_args.timeout
self.log.info("Performing Heat stack create")
stack = orchestration_client.stacks.create(**stack_args)
stack_id = stack['stack']['id']
return stack_id
def _wait_for_heat_complete(self, orchestration_client, stack_id, timeout):
# Wait for the stack to go to COMPLETE.
timeout_t = time.time() + 60 * timeout
marker = None
event_log_context = heat_utils.EventLogContext()
kwargs = {
'sort_dir': 'asc',
'nested_depth': '6'
}
while True:
time.sleep(2)
events = event_utils.get_events(
orchestration_client,
stack_id=stack_id,
event_args=kwargs,
marker=marker)
if events:
marker = getattr(events[-1], 'id', None)
events_log = heat_utils.event_log_formatter(
events, event_log_context)
print(events_log)
status = orchestration_client.stacks.get(stack_id).status
if status == 'FAILED':
raise Exception('Stack create failed')
if status == 'COMPLETE':
break
if time.time() > timeout_t:
msg = 'Stack creation timeout: %d minutes elapsed' % (timeout)
raise Exception(msg)
def _download_ansible_playbooks(self, client):
stack_config = config.Config(client)
output_dir = os.environ.get('HOME')
print('** Downloading undercloud ansible.. **')
# python output buffering is making this seem to take forever..
sys.stdout.flush()
stack_config.download_config('undercloud', output_dir)
# Sadly the above writes the ansible config to a new directory each
# time. This finds the newest new entry.
ansible_dir = max(glob.iglob('%s/tripleo-*-config' % output_dir),
key=os.path.getctime)
# Write out the inventory file.
with open('%s/inventory' % ansible_dir, 'w') as f:
f.write(ANSIBLE_INVENTORY.format(hostname=self._get_hostname()))
print('** Downloaded undercloud ansible to %s **' % ansible_dir)
sys.stdout.flush()
return ansible_dir
# Never returns, calls exec()
def _launch_ansible(self, ansible_dir):
os.chdir(ansible_dir)
playbook_inventory = "%s/inventory" % (ansible_dir)
cmd = ['ansible-playbook', '-i', playbook_inventory,
'deploy_steps_playbook.yaml', '-e', 'role_name=Undercloud',
'-e', 'deploy_server_id=undercloud', '-e',
'bootstrap_server_id=undercloud']
print('Running Ansible: %s' % (' '.join(cmd)))
# execvp() doesn't return.
os.execvp(cmd[0], cmd)
def get_parser(self, prog_name): def get_parser(self, prog_name):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
@@ -452,13 +401,6 @@ class DeployUndercloud(command.Command):
help=_('Heat API port to use for the installers private' help=_('Heat API port to use for the installers private'
' Heat API instance. Optional. Default: 8006.)') ' Heat API instance. Optional. Default: 8006.)')
) )
parser.add_argument(
'--fake-keystone-port', metavar='<FAKE_KEYSTONE_PORT>',
dest='fake_keystone_port',
default='35358',
help=_('Keystone API port to use for the installers private'
' fake Keystone API instance. Optional. Default: 35358.)')
)
parser.add_argument( parser.add_argument(
'--heat-user', metavar='<HEAT_USER>', '--heat-user', metavar='<HEAT_USER>',
dest='heat_user', dest='heat_user',
@@ -509,13 +451,6 @@ class DeployUndercloud(command.Command):
'for this machine.') 'for this machine.')
return return
# NOTE(dprince): It would be nice if heat supported true 'noauth'
# use in a local format for our use case here (or perhaps dev testing)
# but until it does running our own lightweight shim to mock out
# the required API calls works just as well. To keep fake keystone
# light we run it in a thread.
if not os.environ.get('FAKE_KEYSTONE_PORT'):
os.environ['FAKE_KEYSTONE_PORT'] = parsed_args.fake_keystone_port
if not os.environ.get('HEAT_API_PORT'): if not os.environ.get('HEAT_API_PORT'):
os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port
@@ -525,73 +460,34 @@ class DeployUndercloud(command.Command):
if os.geteuid() != 0: if os.geteuid() != 0:
raise exceptions.DeploymentError("Please run as root.") raise exceptions.DeploymentError("Please run as root.")
# Install required packages and configure puppet # configure puppet
self._install_prerequisites(parsed_args.heat_native)
self._configure_puppet() self._configure_puppet()
keystone_pid = self._fork_fake_keystone()
# we do this as root to chown config files properly for docker, etc.
if parsed_args.heat_native:
heat_launch = heat_launcher.HeatNativeLauncher(
parsed_args.heat_api_port,
parsed_args.fake_keystone_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
else:
heat_launch = heat_launcher.HeatDockerLauncher(
parsed_args.heat_api_port,
parsed_args.fake_keystone_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
heat_pid = None
try: try:
# NOTE(dprince): we launch heat with fork exec because # Launch heat.
# we don't want it to inherit our args. Launching heat orchestration_client = self._launch_heat(parsed_args)
# as a "library" would be cool... but that would require # Wait for heat to be ready.
# more refactoring. It runs a single process and we kill self._wait_local_port_ready(parsed_args.heat_api_port)
# it always below. # Deploy TripleO Heat templates.
heat_pid = os.fork() stack_id = \
if heat_pid == 0: self._deploy_tripleo_heat_templates(orchestration_client,
os.setpgrp() parsed_args)
if parsed_args.heat_native: # Wait for complete..
try: self._wait_for_heat_complete(orchestration_client, stack_id,
uid = pwd.getpwnam(parsed_args.heat_user).pw_uid parsed_args.timeout)
gid = pwd.getpwnam(parsed_args.heat_user).pw_gid # download the ansible playbooks and execute them.
except KeyError: ansible_dir = \
raise exceptions.DeploymentError( self._download_ansible_playbooks(orchestration_client)
"Please create a %s user account before " # Kill heat, we're done with it now.
"proceeding." % parsed_args.heat_user) self._kill_heat()
os.setgid(gid) # Never returns.. We exec() it directly.
os.setuid(uid) self._launch_ansible(ansible_dir)
heat_launch.heat_db_sync() except Exception as e:
heat_launch.launch_heat() print("Exception: %s" % e)
else: print(traceback.format_exception(*sys.exc_info()))
heat_launch.heat_db_sync() raise
heat_launch.launch_heat()
else:
self._wait_local_port_ready(parsed_args.fake_keystone_port)
self._wait_local_port_ready(parsed_args.heat_api_port)
self._write_credentials()
if self._deploy_tripleo_heat_templates(parsed_args):
print("\nDeploy Successful.")
else:
print("\nUndercloud deployment failed: "
"press ctrl-c to exit.")
while parsed_args.keep_running:
try:
time.sleep(1)
except KeyboardInterrupt:
break
raise exceptions.DeploymentError("Stack create failed.")
finally: finally:
if heat_launch and heat_pid != 0: # We only get here on error.
print('Log files at: %s' % heat_launch.install_tmp) print('ERROR: Heat log files: %s' % (self.heat_launch.install_tmp))
heat_launch.kill_heat(heat_pid) self._kill_heat()
if keystone_pid: return 1
os.kill(keystone_pid, signal.SIGKILL)