Implement minor update workflow with config download
This change aim to refactor the way of doing the minor upgrade via ansible playbook download by the config download It will depend on a mistral change which will get the ansible update_task and run it via mistral The user will have two ways for performing the minor update: - the stack update command which will make an automatic minor upgrade (or just upgrade a given set of nodes) - running it manually via ansible on the undercloud Closes-Bug: #1715557 Closes-Bug: #1723108 Change-Id: I4fcd443d975894a1da0286b19506d00682c5768c
This commit is contained in:
parent
9fb431463d
commit
8a7da9fe26
@ -93,7 +93,6 @@ openstack.tripleoclient.v1 =
|
||||
overcloud_role_list = tripleoclient.v1.overcloud_roles:RoleList
|
||||
overcloud_roles_generate = tripleoclient.v1.overcloud_roles:RolesGenerate
|
||||
overcloud_support_report_collect = tripleoclient.v1.overcloud_support:ReportExecute
|
||||
overcloud_update_clear_breakpoints = tripleoclient.v1.overcloud_update:ClearBreakpointsOvercloud
|
||||
overcloud_update_stack = tripleoclient.v1.overcloud_update:UpdateOvercloud
|
||||
overcloud_execute = tripleoclient.v1.overcloud_execute:RemoteExecute
|
||||
overcloud_generate_fencing = tripleoclient.v1.overcloud_parameters:GenerateFencingParameters
|
||||
|
@ -1,91 +0,0 @@
|
||||
# Copyright 2015 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
|
||||
|
||||
FAKE_STACK = {
|
||||
'parameters': {
|
||||
'ControllerCount': 1,
|
||||
'ComputeCount': 1,
|
||||
'ObjectStorageCount': 0,
|
||||
'BlockStorageCount': 0,
|
||||
'CephStorageCount': 0,
|
||||
},
|
||||
'stack_name': 'overcloud',
|
||||
'stack_status': "CREATE_COMPLETE",
|
||||
'outputs': [
|
||||
{'output_key': 'RoleConfig',
|
||||
'output_value': {
|
||||
'foo_config': 'foo'}},
|
||||
{'output_key': 'RoleData',
|
||||
'output_value': {
|
||||
'FakeCompute': {
|
||||
'config_settings': {'nova::compute::libvirt::services::'
|
||||
'libvirt_virt_type': 'qemu'},
|
||||
'global_config_settings': {},
|
||||
'logging_groups': ['root', 'neutron', 'nova'],
|
||||
'logging_sources': [{'path': '/var/log/nova/nova-compute.log',
|
||||
'type': 'tail'}],
|
||||
'monitoring_subscriptions': ['overcloud-nova-compute'],
|
||||
'service_config_settings': {'horizon': {'neutron::'
|
||||
'plugins': ['ovs']}
|
||||
},
|
||||
'service_metadata_settings': None,
|
||||
'service_names': ['nova_compute', 'fake_service'],
|
||||
'step_config': ['include ::tripleo::profile::base::sshd',
|
||||
'include ::timezone'],
|
||||
'upgrade_batch_tasks': [],
|
||||
'upgrade_tasks': [{'name': 'Stop fake service',
|
||||
'service': 'name=fake state=stopped',
|
||||
'tags': 'step1',
|
||||
'when': 'existingcondition'},
|
||||
{'name': 'Stop nova-compute service',
|
||||
'service': 'name=openstack-nova-compute '
|
||||
'state=stopped',
|
||||
'tags': 'step1',
|
||||
'when': ['existing', 'list']}]
|
||||
},
|
||||
'FakeController': {
|
||||
'config_settings': {'tripleo::haproxy::user': 'admin'},
|
||||
'global_config_settings': {},
|
||||
'logging_groups': ['root', 'keystone', 'neutron'],
|
||||
'logging_sources': [{'path': '/var/log/keystone/keystone.log',
|
||||
'type': 'tail'}],
|
||||
'monitoring_subscriptions': ['overcloud-keystone'],
|
||||
'service_config_settings': {'horizon': {'neutron::'
|
||||
'plugins': ['ovs']}
|
||||
},
|
||||
'service_metadata_settings': None,
|
||||
'service_names': ['pacemaker', 'fake_service'],
|
||||
'step_config': ['include ::tripleo::profile::base::sshd',
|
||||
'include ::timezone'],
|
||||
'upgrade_batch_tasks': [],
|
||||
'upgrade_tasks': [{'name': 'Stop fake service',
|
||||
'service': 'name=fake state=stopped',
|
||||
'tags': 'step1'}]}}}]}
|
||||
|
||||
|
||||
def create_to_dict_mock(**kwargs):
|
||||
mock_with_to_dict = mock.Mock()
|
||||
mock_with_to_dict.configure_mock(**kwargs)
|
||||
mock_with_to_dict.to_dict.return_value = kwargs
|
||||
return mock_with_to_dict
|
||||
|
||||
|
||||
def create_tht_stack(**kwargs):
|
||||
stack = FAKE_STACK.copy()
|
||||
stack.update(kwargs)
|
||||
return create_to_dict_mock(**stack)
|
@ -10,15 +10,10 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import fixtures
|
||||
import mock
|
||||
import os
|
||||
|
||||
from mock import call
|
||||
from mock import patch
|
||||
from osc_lib.tests import utils
|
||||
|
||||
from tripleoclient.tests.v1.overcloud_config import fakes
|
||||
from tripleoclient.v1 import overcloud_config
|
||||
|
||||
|
||||
@ -32,138 +27,14 @@ class TestOvercloudConfig(utils.TestCommand):
|
||||
self.app.client_manager.orchestration = mock.Mock()
|
||||
self.workflow = self.app.client_manager.workflow_engine
|
||||
|
||||
@patch.object(overcloud_config.DownloadConfig, '_mkdir')
|
||||
@patch.object(overcloud_config.DownloadConfig, '_open_file')
|
||||
@mock.patch('tempfile.mkdtemp', autospec=True)
|
||||
def test_overcloud_config_generate_config(self,
|
||||
mock_tmpdir,
|
||||
mock_open,
|
||||
mock_mkdir):
|
||||
@mock.patch('tripleo_common.utils.config.Config.download_config')
|
||||
def test_overcloud_download_config(self, mock_config):
|
||||
arglist = ['--name', 'overcloud', '--config-dir', '/tmp']
|
||||
verifylist = [
|
||||
('name', 'overcloud'),
|
||||
('config_dir', '/tmp')
|
||||
]
|
||||
config_type_list = ['config_settings', 'global_config_settings',
|
||||
'logging_sources', 'monitoring_subscriptions',
|
||||
'service_config_settings',
|
||||
'service_metadata_settings',
|
||||
'service_names',
|
||||
'upgrade_batch_tasks', 'upgrade_tasks']
|
||||
fake_role = [role for role in
|
||||
fakes.FAKE_STACK['outputs'][1]['output_value']]
|
||||
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
clients = self.app.client_manager
|
||||
orchestration_client = clients.orchestration
|
||||
orchestration_client.stacks.get.return_value = fakes.create_tht_stack()
|
||||
mock_tmpdir.return_value = "/tmp/tht"
|
||||
self.cmd.take_action(parsed_args)
|
||||
expected_mkdir_calls = [call('/tmp/tht/%s' % r) for r in fake_role]
|
||||
mock_mkdir.assert_has_calls(expected_mkdir_calls, any_order=True)
|
||||
expected_calls = []
|
||||
for config in config_type_list:
|
||||
for role in fake_role:
|
||||
if config == 'step_config':
|
||||
expected_calls += [call('/tmp/tht/%s/%s.pp' %
|
||||
(role, config))]
|
||||
else:
|
||||
expected_calls += [call('/tmp/tht/%s/%s.yaml' %
|
||||
(role, config))]
|
||||
mock_open.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
@patch.object(overcloud_config.DownloadConfig, '_mkdir')
|
||||
@patch.object(overcloud_config.DownloadConfig, '_open_file')
|
||||
@mock.patch('tempfile.mkdtemp', autospec=True)
|
||||
def test_overcloud_config_one_config_type(self,
|
||||
mock_tmpdir,
|
||||
mock_open,
|
||||
mock_mkdir):
|
||||
|
||||
arglist = ['--name', 'overcloud', '--config-dir', '/tmp',
|
||||
'--config-type', ['config_settings']]
|
||||
verifylist = [
|
||||
('name', 'overcloud'),
|
||||
('config_dir', '/tmp'),
|
||||
('config_type', ['config_settings'])
|
||||
]
|
||||
expected_config_type = 'config_settings'
|
||||
fake_role = [role for role in
|
||||
fakes.FAKE_STACK['outputs'][1]['output_value']]
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
|
||||
clients = self.app.client_manager
|
||||
orchestration_client = clients.orchestration
|
||||
orchestration_client.stacks.get.return_value = fakes.create_tht_stack()
|
||||
mock_tmpdir.return_value = "/tmp/tht"
|
||||
self.cmd.take_action(parsed_args)
|
||||
expected_mkdir_calls = [call('/tmp/tht/%s' % r) for r in fake_role]
|
||||
expected_calls = [call('/tmp/tht/%s/%s.yaml'
|
||||
% (r, expected_config_type))
|
||||
for r in fake_role]
|
||||
mock_mkdir.assert_has_calls(expected_mkdir_calls, any_order=True)
|
||||
mock_open.assert_has_calls(expected_calls, any_order=True)
|
||||
|
||||
@mock.patch('os.mkdir')
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('tempfile.mkdtemp', autospec=True)
|
||||
def test_overcloud_config_wrong_config_type(self, mock_tmpdir,
|
||||
mock_open, mock_mkdir):
|
||||
|
||||
arglist = [
|
||||
'--name', 'overcloud',
|
||||
'--config-dir',
|
||||
'/tmp', '--config-type', ['bad_config']]
|
||||
verifylist = [
|
||||
('name', 'overcloud'),
|
||||
('config_dir', '/tmp'),
|
||||
('config_type', ['bad_config'])
|
||||
]
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
clients = self.app.client_manager
|
||||
mock_tmpdir.return_value = "/tmp/tht"
|
||||
orchestration_client = clients.orchestration
|
||||
orchestration_client.stacks.get.return_value = fakes.create_tht_stack()
|
||||
self.assertRaises(
|
||||
KeyError,
|
||||
self.cmd.take_action, parsed_args)
|
||||
|
||||
@mock.patch('tripleoclient.utils.get_role_data', autospec=True)
|
||||
def test_overcloud_config_upgrade_tasks(self, mock_get_role_data):
|
||||
|
||||
clients = self.app.client_manager
|
||||
orchestration_client = clients.orchestration
|
||||
orchestration_client.stacks.get.return_value = fakes.create_tht_stack()
|
||||
self.tmp_dir = self.useFixture(fixtures.TempDir()).path
|
||||
fake_role = [role for role in
|
||||
fakes.FAKE_STACK['outputs'][1]['output_value']]
|
||||
expected_tasks = {'FakeController': [{'name': 'Stop fake service',
|
||||
'service': 'name=fake '
|
||||
'state=stopped',
|
||||
'tags': 'step1',
|
||||
'when': 'step|int == 1'}],
|
||||
'FakeCompute': [{'name': 'Stop fake service',
|
||||
'service':
|
||||
'name=fake state=stopped',
|
||||
'tags': 'step1',
|
||||
'when': ['existingcondition',
|
||||
'step|int == 1']},
|
||||
{'name': 'Stop nova-'
|
||||
'compute service',
|
||||
'service':
|
||||
'name=openstack-nova-'
|
||||
'compute state=stopped',
|
||||
'tags': 'step1',
|
||||
'when': ['existing',
|
||||
'list', 'step|int == 1']}]}
|
||||
mock_get_role_data.return_value = fake_role
|
||||
|
||||
for role in fake_role:
|
||||
filedir = os.path.join(self.tmp_dir, role)
|
||||
os.makedirs(filedir)
|
||||
filepath = os.path.join(filedir, "upgrade_tasks_playbook.yaml")
|
||||
playbook_tasks = self.cmd._write_playbook_get_tasks(
|
||||
fakes.FAKE_STACK['outputs'][1]['output_value'][role]
|
||||
['upgrade_tasks'], role, filepath)
|
||||
self.assertTrue(os.path.isfile(filepath))
|
||||
self.assertEqual(expected_tasks[role], playbook_tasks)
|
||||
mock_config.assert_called_once_with('overcloud', '/tmp', None)
|
||||
|
@ -14,6 +14,7 @@
|
||||
#
|
||||
|
||||
import mock
|
||||
import uuid
|
||||
|
||||
from tripleoclient import exceptions
|
||||
from tripleoclient.tests.v1.overcloud_update import fakes
|
||||
@ -30,44 +31,89 @@ class TestOvercloudUpdate(fakes.TestOvercloudUpdate):
|
||||
app_args.verbose_level = 1
|
||||
self.cmd = overcloud_update.UpdateOvercloud(self.app, app_args)
|
||||
|
||||
uuid4_patcher = mock.patch('uuid.uuid4', return_value="UUID4")
|
||||
self.mock_uuid4 = uuid4_patcher.start()
|
||||
self.addCleanup(self.mock_uuid4.stop)
|
||||
|
||||
@mock.patch('tripleoclient.utils.get_stack',
|
||||
autospec=True)
|
||||
@mock.patch('tripleoclient.v1.overcloud_update.UpdateOvercloud.log',
|
||||
autospec=True)
|
||||
@mock.patch('tripleoclient.workflows.package_update.update_and_wait',
|
||||
@mock.patch('tripleoclient.workflows.package_update.update',
|
||||
autospec=True)
|
||||
def test_update_out(self, mock_update_wait, mock_logger, mock_get_stack):
|
||||
mock_update_wait.return_value = 'COMPLETE'
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('os.path.abspath')
|
||||
@mock.patch('yaml.load')
|
||||
def test_update_out(self, mock_yaml, mock_abspath, mock_open, mock_update,
|
||||
mock_logger, mock_get_stack):
|
||||
mock_stack = mock.Mock()
|
||||
mock_stack.stack_name = 'mystack'
|
||||
mock_get_stack.return_value = mock_stack
|
||||
# mock_logger.return_value = mock.Mock()
|
||||
|
||||
argslist = ['overcloud', '-i', '--templates']
|
||||
mock_abspath.return_value = '/home/fake/my-fake-registry.yaml'
|
||||
mock_yaml.return_value = {'fake_container': 'fake_value'}
|
||||
argslist = ['--stack', 'overcloud', '--init-minor-update',
|
||||
'--container-registry-file', 'my-fake-registry.yaml']
|
||||
verifylist = [
|
||||
('stack', 'overcloud'),
|
||||
('interactive', True),
|
||||
('templates', '/usr/share/openstack-tripleo-heat-templates/')
|
||||
('init_minor_update', True),
|
||||
('container_registry_file', 'my-fake-registry.yaml')
|
||||
]
|
||||
|
||||
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
|
||||
self.cmd.take_action(parsed_args)
|
||||
mock_update_wait.assert_called_once_with(
|
||||
mock_logger,
|
||||
mock_update.assert_called_once_with(
|
||||
self.app.client_manager,
|
||||
mock_stack, 'mystack', 1, 0)
|
||||
container='mystack',
|
||||
container_registry={'fake_container': 'fake_value'},
|
||||
queue_name=str(uuid.uuid4()))
|
||||
|
||||
@mock.patch('tripleoclient.workflows.package_update.update_and_wait',
|
||||
@mock.patch('tripleoclient.workflows.package_update.update',
|
||||
autospec=True)
|
||||
def test_update_failed(self, mock_update_wait):
|
||||
mock_update_wait.return_value = 'FAILED'
|
||||
argslist = ['overcloud', '-i', '--templates']
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
@mock.patch('os.path.abspath')
|
||||
@mock.patch('yaml.load')
|
||||
def test_update_failed(self, mock_yaml, mock_abspath, mock_open,
|
||||
mock_update):
|
||||
mock_update.side_effect = exceptions.DeploymentError()
|
||||
mock_abspath.return_value = '/home/fake/my-fake-registry.yaml'
|
||||
mock_yaml.return_value = {'fake_container': 'fake_value'}
|
||||
argslist = ['--stack', 'overcloud', '--init-minor-update',
|
||||
'--container-registry-file', 'my-fake-registry.yaml']
|
||||
verifylist = [
|
||||
('stack', 'overcloud'),
|
||||
('interactive', True),
|
||||
('templates', '/usr/share/openstack-tripleo-heat-templates/')
|
||||
('init_minor_update', True),
|
||||
('container_registry_file', 'my-fake-registry.yaml')
|
||||
]
|
||||
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
|
||||
|
||||
self.assertRaises(exceptions.DeploymentError,
|
||||
self.cmd.take_action, parsed_args)
|
||||
|
||||
@mock.patch('tripleoclient.workflows.package_update.update_ansible',
|
||||
autospec=True)
|
||||
@mock.patch('os.path.expanduser')
|
||||
@mock.patch('oslo_concurrency.processutils.execute')
|
||||
@mock.patch('six.moves.builtins.open')
|
||||
def test_update_ansible(self, mock_open, mock_execute,
|
||||
mock_expanduser, update_ansible):
|
||||
mock_expanduser.return_value = '/home/fake/'
|
||||
argslist = ['--stack', 'overcloud', '--nodes', 'Compute', '--playbook',
|
||||
'fake-playbook.yaml']
|
||||
verifylist = [
|
||||
('stack', 'overcloud'),
|
||||
('nodes', 'Compute'),
|
||||
('generate_inventory', True),
|
||||
('static_inventory', 'tripleo-hosts-inventory'),
|
||||
('playbook', 'fake-playbook.yaml')
|
||||
]
|
||||
|
||||
parsed_args = self.check_parser(self.cmd, argslist, verifylist)
|
||||
with mock.patch('os.path.exists') as mock_exists:
|
||||
mock_exists.return_value = True
|
||||
self.cmd.take_action(parsed_args)
|
||||
update_ansible.assert_called_once_with(
|
||||
self.app.client_manager,
|
||||
nodes='Compute',
|
||||
inventory_file=mock_open().read(),
|
||||
playbook='fake-playbook.yaml',
|
||||
queue_name=str(uuid.uuid4()))
|
||||
|
@ -12,15 +12,11 @@
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
import tempfile
|
||||
import yaml
|
||||
|
||||
from osc_lib.command import command
|
||||
from osc_lib.i18n import _
|
||||
|
||||
from tripleoclient import utils
|
||||
from tripleo_common.utils import config as ooo_config
|
||||
|
||||
|
||||
class DownloadConfig(command.Command):
|
||||
@ -49,107 +45,20 @@ class DownloadConfig(command.Command):
|
||||
'--config-type',
|
||||
dest='config_type',
|
||||
type=list,
|
||||
default=None,
|
||||
help=_('Type of object config to be extract from the deployment, '
|
||||
'defaults to all keys available'),
|
||||
)
|
||||
return parser
|
||||
|
||||
@staticmethod
|
||||
def _open_file(path):
|
||||
return os.fdopen(os.open(path,
|
||||
os.O_WRONLY | os.O_CREAT, 0o600),
|
||||
'w')
|
||||
|
||||
def _step_tags_to_when(self, sorted_tasks):
|
||||
for task in sorted_tasks:
|
||||
tag = task.get('tags', '')
|
||||
match = re.search('step([0-9]+)', tag)
|
||||
if match:
|
||||
step = match.group(1)
|
||||
whenexpr = task.get('when', None)
|
||||
if whenexpr:
|
||||
# Handle when: foo and a list of when conditionals
|
||||
if not isinstance(whenexpr, list):
|
||||
whenexpr = [whenexpr]
|
||||
for w in whenexpr:
|
||||
when_exists = re.search('step|int == [0-9]', w)
|
||||
if when_exists:
|
||||
break
|
||||
if when_exists:
|
||||
# Skip to the next task,
|
||||
# there is an existing 'step|int == N'
|
||||
continue
|
||||
whenexpr.append("step|int == %s" % step)
|
||||
task['when'] = whenexpr
|
||||
else:
|
||||
task.update({"when": "step|int == %s" % step})
|
||||
|
||||
def _write_playbook_get_tasks(self, tasks, role, filepath):
|
||||
playbook = []
|
||||
sorted_tasks = sorted(tasks, key=lambda x: x.get('tags', None))
|
||||
self._step_tags_to_when(sorted_tasks)
|
||||
playbook.append({'name': '%s playbook' % role,
|
||||
'hosts': role,
|
||||
'tasks': sorted_tasks})
|
||||
with self._open_file(filepath) as conf_file:
|
||||
yaml.safe_dump(playbook, conf_file, default_flow_style=False)
|
||||
return sorted_tasks
|
||||
|
||||
def _mkdir(self, dirname):
|
||||
if not os.path.exists(dirname):
|
||||
try:
|
||||
os.mkdir(dirname, 0o700)
|
||||
except OSError as e:
|
||||
message = 'Failed to create: %s, error: %s' % (dirname,
|
||||
str(e))
|
||||
raise OSError(message)
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)" % parsed_args)
|
||||
# Get clients
|
||||
clients = self.app.client_manager
|
||||
|
||||
name = parsed_args.name
|
||||
config_dir = parsed_args.config_dir
|
||||
self._mkdir(config_dir)
|
||||
stack = utils.get_stack(clients.orchestration, name)
|
||||
tmp_path = tempfile.mkdtemp(prefix='tripleo-',
|
||||
suffix='-config',
|
||||
dir=config_dir)
|
||||
self.log.info("Generating configuration under the directory: "
|
||||
"%s" % tmp_path)
|
||||
role_data = utils.get_role_data(stack)
|
||||
for role_name, role in six.iteritems(role_data):
|
||||
role_path = os.path.join(tmp_path, role_name)
|
||||
self._mkdir(role_path)
|
||||
for config in parsed_args.config_type or role.keys():
|
||||
if config == 'step_config':
|
||||
filepath = os.path.join(role_path, 'step_config.pp')
|
||||
with self._open_file(filepath) as step_config:
|
||||
step_config.write('\n'.join(step for step in
|
||||
role[config]
|
||||
if step is not None))
|
||||
else:
|
||||
if 'upgrade_tasks' in config:
|
||||
filepath = os.path.join(role_path, '%s_playbook.yaml' %
|
||||
config)
|
||||
data = self._write_playbook_get_tasks(
|
||||
role[config], role_name, filepath)
|
||||
else:
|
||||
try:
|
||||
data = role[config]
|
||||
except KeyError as e:
|
||||
message = 'Invalid key: %s, error: %s' % (config,
|
||||
str(e))
|
||||
raise KeyError(message)
|
||||
filepath = os.path.join(role_path, '%s.yaml' % config)
|
||||
with self._open_file(filepath) as conf_file:
|
||||
yaml.safe_dump(data,
|
||||
conf_file,
|
||||
default_flow_style=False)
|
||||
role_config = utils.get_role_config(stack)
|
||||
for config_name, config in six.iteritems(role_config):
|
||||
conf_path = os.path.join(tmp_path, config_name + ".yaml")
|
||||
with self._open_file(conf_path) as conf_file:
|
||||
conf_file.write(config)
|
||||
print("The TripleO configuration has been successfully generated "
|
||||
"into: {0}".format(tmp_path))
|
||||
config_type = parsed_args.config_type
|
||||
# Get config
|
||||
config = ooo_config.Config(clients.orchestration)
|
||||
return config.download_config(name, config_dir, config_type)
|
||||
|
@ -14,11 +14,13 @@
|
||||
#
|
||||
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
import yaml
|
||||
|
||||
from osc_lib.command import command
|
||||
from osc_lib.i18n import _
|
||||
from osc_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
|
||||
from tripleoclient import constants
|
||||
from tripleoclient import exceptions
|
||||
@ -33,36 +35,54 @@ class UpdateOvercloud(command.Command):
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(UpdateOvercloud, self).get_parser(prog_name)
|
||||
parser.add_argument('stack', nargs='?',
|
||||
parser.add_argument('--stack',
|
||||
nargs='?',
|
||||
dest='stack',
|
||||
help=_('Name or ID of heat stack to scale '
|
||||
'(default=Env: OVERCLOUD_STACK_NAME)'),
|
||||
default=utils.env('OVERCLOUD_STACK_NAME'))
|
||||
parser.add_argument(
|
||||
'--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES,
|
||||
help=_("The directory containing the Heat templates to deploy. "
|
||||
"This argument is deprecated. The command now utilizes "
|
||||
"a deployment plan, which should be updated prior to "
|
||||
"running this command, should that be required. Otherwise "
|
||||
"this argument will be silently ignored."),
|
||||
)
|
||||
parser.add_argument('-i', '--interactive', dest='interactive',
|
||||
action='store_true')
|
||||
parser.add_argument(
|
||||
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
|
||||
action='append', dest='environment_files',
|
||||
help=_("Environment files to be passed to the heat stack-create "
|
||||
"or heat stack-update command. (Can be specified more than "
|
||||
"once.) This argument is deprecated. The command now "
|
||||
"utilizes a deployment plan, which should be updated prior "
|
||||
"to running this command, should that be required. "
|
||||
"Otherwise this argument will be silently ignored."),
|
||||
)
|
||||
parser.add_argument(
|
||||
'--answers-file',
|
||||
help=_('Path to a YAML file with arguments and parameters. '
|
||||
'DEPRECATED. Not necessary when used with a plan. Will '
|
||||
'be silently ignored, and removed in the "P" release.')
|
||||
)
|
||||
default='overcloud'
|
||||
)
|
||||
parser.add_argument('--templates',
|
||||
nargs='?',
|
||||
default=constants.TRIPLEO_HEAT_TEMPLATES,
|
||||
help=_("The directory containing the Heat"
|
||||
"templates to deploy. "),
|
||||
)
|
||||
parser.add_argument('--init-minor-update',
|
||||
dest='init_minor_update',
|
||||
action='store_true',
|
||||
help=_("Init the minor update heat config output."
|
||||
"Needs to be run only once"),
|
||||
)
|
||||
parser.add_argument('--container-registry-file',
|
||||
dest='container_registry_file',
|
||||
default=None,
|
||||
help=_("File which contains the container "
|
||||
"registry data for the update"),
|
||||
)
|
||||
parser.add_argument('--nodes',
|
||||
action="store",
|
||||
default=None,
|
||||
help=_('Nodes to update.')
|
||||
)
|
||||
parser.add_argument('--playbook',
|
||||
action="store",
|
||||
default="update_steps_playbook.yaml",
|
||||
help=_('Playbook to use for update')
|
||||
)
|
||||
parser.add_argument('--generate-inventory',
|
||||
dest='generate_inventory',
|
||||
action='store_true',
|
||||
default=True,
|
||||
help=_("Generate inventory for the ansible "
|
||||
"playbook"),
|
||||
)
|
||||
parser.add_argument('--static-inventory',
|
||||
dest='static_inventory',
|
||||
action="store",
|
||||
default='tripleo-hosts-inventory',
|
||||
help=_('Path to the static inventory to use')
|
||||
)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
@ -73,47 +93,47 @@ class UpdateOvercloud(command.Command):
|
||||
parsed_args.stack)
|
||||
|
||||
stack_name = stack.stack_name
|
||||
if parsed_args.interactive:
|
||||
timeout = 0
|
||||
container_registry = parsed_args.container_registry_file
|
||||
|
||||
status = package_update.update_and_wait(
|
||||
self.log, clients, stack, stack_name,
|
||||
self.app_args.verbose_level, timeout)
|
||||
if status not in ['COMPLETE']:
|
||||
raise exceptions.DeploymentError("Package update failed.")
|
||||
else:
|
||||
if parsed_args.init_minor_update:
|
||||
# Update the container registry:
|
||||
if container_registry:
|
||||
with open(os.path.abspath(container_registry)) as content:
|
||||
registry = yaml.load(content.read())
|
||||
else:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"You need to provide a container registry file in order "
|
||||
"to update your current containers deployed.")
|
||||
# Execute minor update
|
||||
package_update.update(clients, container=stack_name,
|
||||
container_registry=registry,
|
||||
queue_name=str(uuid.uuid4()))
|
||||
print("Package update on stack {0} initiated.".format(
|
||||
parsed_args.stack))
|
||||
|
||||
|
||||
class ClearBreakpointsOvercloud(command.Command):
|
||||
"""Clears a set of breakpoints on a currently updating overcloud"""
|
||||
|
||||
log = logging.getLogger(__name__ + ".ClearBreakpointsOvercloud")
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(ClearBreakpointsOvercloud, self).get_parser(prog_name)
|
||||
parser.add_argument('stack', nargs='?',
|
||||
help=_('Name or ID of heat stack to clear a '
|
||||
'breakpoint or set of breakpoints '
|
||||
'(default=Env: OVERCLOUD_STACK_NAME)'),
|
||||
default=utils.env('OVERCLOUD_STACK_NAME'))
|
||||
parser.add_argument('--ref',
|
||||
action='append',
|
||||
dest='refs',
|
||||
help=_('Breakpoint to clear'))
|
||||
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)" % parsed_args)
|
||||
clients = self.app.client_manager
|
||||
|
||||
heat = clients.orchestration
|
||||
|
||||
stack = oooutils.get_stack(heat, parsed_args.stack)
|
||||
|
||||
package_update.clear_breakpoints(clients, stack_id=stack.id,
|
||||
refs=parsed_args.refs)
|
||||
print("Minor update init on stack {0} complete.".format(
|
||||
parsed_args.stack))
|
||||
# Run ansible:
|
||||
nodes = parsed_args.nodes
|
||||
playbook = parsed_args.playbook
|
||||
if nodes is not None:
|
||||
inventory_path = '%s/%s' % (os.path.expanduser('~'),
|
||||
parsed_args.static_inventory)
|
||||
if parsed_args.generate_inventory:
|
||||
try:
|
||||
processutils.execute('/bin/tripleo-ansible-inventory',
|
||||
'--static-inventory', inventory_path)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
message = "Failed to generate inventory file: %s" % str(e)
|
||||
raise exceptions.InvalidConfiguration(message)
|
||||
if os.path.exists(inventory_path):
|
||||
inventory = open(inventory_path, 'r').read()
|
||||
else:
|
||||
raise exceptions.InvalidConfiguration(
|
||||
"Inventory file missing, provide an inventory file or "
|
||||
"generate an inventory by using the --generate-inventory "
|
||||
"option")
|
||||
output = package_update.update_ansible(
|
||||
clients, nodes=nodes,
|
||||
inventory_file=inventory,
|
||||
playbook=playbook,
|
||||
queue_name=str(uuid.uuid4()))
|
||||
print (output)
|
||||
|
@ -12,11 +12,13 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import pprint
|
||||
import uuid
|
||||
import time
|
||||
|
||||
from tripleo_common import update as update_common
|
||||
from heatclient.common import event_utils
|
||||
from openstackclient import shell
|
||||
from tripleoclient import exceptions
|
||||
from tripleoclient import utils
|
||||
|
||||
from tripleoclient import utils as oooutils
|
||||
from tripleoclient.workflows import base
|
||||
|
||||
|
||||
@ -24,6 +26,7 @@ def update(clients, **workflow_input):
|
||||
workflow_client = clients.workflow_engine
|
||||
tripleoclients = clients.tripleoclient
|
||||
queue_name = workflow_input['queue_name']
|
||||
plan_name = workflow_input['container']
|
||||
|
||||
with tripleoclients.messaging_websocket(queue_name) as ws:
|
||||
execution = base.start_workflow(
|
||||
@ -35,48 +38,31 @@ def update(clients, **workflow_input):
|
||||
for payload in base.wait_for_messages(workflow_client, ws, execution):
|
||||
assert payload['status'] == "SUCCESS", pprint.pformat(payload)
|
||||
|
||||
orchestration_client = clients.orchestration
|
||||
|
||||
def update_and_wait(log, clients, stack, plan_name, verbose_level,
|
||||
timeout=None):
|
||||
"""Start the update and wait for it to give breakpoints or finish"""
|
||||
events = event_utils.get_events(orchestration_client,
|
||||
stack_id=plan_name,
|
||||
event_args={'sort_dir': 'desc',
|
||||
'limit': 1})
|
||||
marker = events[0].id if events else None
|
||||
|
||||
log.info("Performing Heat stack update")
|
||||
queue_name = str(uuid.uuid4())
|
||||
|
||||
workflow_input = {
|
||||
"container": plan_name,
|
||||
"queue_name": queue_name,
|
||||
}
|
||||
|
||||
if timeout is not None:
|
||||
workflow_input['timeout'] = timeout
|
||||
|
||||
update(clients, **workflow_input)
|
||||
|
||||
update_manager = update_common.PackageUpdateManager(
|
||||
heatclient=clients.orchestration,
|
||||
novaclient=clients.compute,
|
||||
stack_id=plan_name,
|
||||
stack_fields={})
|
||||
|
||||
update_manager.do_interactive_update()
|
||||
|
||||
stack = oooutils.get_stack(clients.orchestration,
|
||||
plan_name)
|
||||
|
||||
return stack.status
|
||||
time.sleep(10)
|
||||
create_result = utils.wait_for_stack_ready(
|
||||
orchestration_client, plan_name, marker, 'UPDATE', 1)
|
||||
if not create_result:
|
||||
shell.OpenStackShell().run(["stack", "failures", "list", plan_name])
|
||||
raise exceptions.DeploymentError("Heat Stack update failed.")
|
||||
|
||||
|
||||
def clear_breakpoints(clients, **workflow_input):
|
||||
def update_ansible(clients, **workflow_input):
|
||||
workflow_client = clients.workflow_engine
|
||||
tripleoclients = clients.tripleoclient
|
||||
workflow_input['queue_name'] = str(uuid.uuid4())
|
||||
queue_name = workflow_input['queue_name']
|
||||
|
||||
with tripleoclients.messaging_websocket(queue_name) as ws:
|
||||
execution = base.start_workflow(
|
||||
workflow_client,
|
||||
'tripleo.package_update.v1.clear_breakpoints',
|
||||
'tripleo.package_update.v1.update_nodes',
|
||||
workflow_input=workflow_input
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user