Save needed stack outputs in working_dir

With ephemeral-heat, we need to save the needed stack outputs in the
working_dir so that the values can be used when needed if the stack is
no longer around.

With the outputs saved, the stack object no longer needs to be passed to
workflows.deployment.config_download and
workflows.deployment.get_hosts_and_enable_ssh_admin, only the stack name
and output values are needed.

Signed-off-by: James Slagle <jslagle@redhat.com>
Change-Id: I3cc61bfee94227045909a4b0ccf84a8d595b2cea
This commit is contained in:
James Slagle 2021-08-03 17:52:59 -04:00 committed by Alex Schultz
parent b7c9bc1cd7
commit 276a6def10
10 changed files with 87 additions and 59 deletions

View File

@ -0,0 +1,5 @@
---
other:
- Stack outputs that are needed by other functionality of the overcloud
deployment are now saved in the stack working directory in an outputs
subdirectory (default ~/overcloud-deploy/<stack>/outputs).

View File

@ -85,6 +85,9 @@ PUPPET_MODULES = "/etc/puppet/modules/"
PUPPET_BASE = "/etc/puppet/" PUPPET_BASE = "/etc/puppet/"
STACK_TIMEOUT = 240 STACK_TIMEOUT = 240
STACK_OUTPUTS = ['BlacklistedHostnames',
'RoleNetIpMap',
'BlacklistedIpAddresses']
IRONIC_HTTP_BOOT_BIND_MOUNT = '/var/lib/ironic/httpboot' IRONIC_HTTP_BOOT_BIND_MOUNT = '/var/lib/ironic/httpboot'
IRONIC_LOCAL_IMAGE_PATH = '/var/lib/ironic/images' IRONIC_LOCAL_IMAGE_PATH = '/var/lib/ironic/images'

View File

@ -325,7 +325,8 @@ class TestDeployOvercloud(fakes.TestDeployOvercloud):
clients = self.app.client_manager clients = self.app.client_manager
orchestration_client = clients.orchestration orchestration_client = clients.orchestration
mock_stack = fakes.create_tht_stack() mock_stack = fakes.create_tht_stack()
orchestration_client.stacks.get.side_effect = [None, mock_stack] orchestration_client.stacks.get.side_effect = \
[None, mock_stack, mock_stack]
def _orch_clt_create(**kwargs): def _orch_clt_create(**kwargs):
orchestration_client.stacks.get.return_value = mock_stack orchestration_client.stacks.get.return_value = mock_stack
@ -1421,7 +1422,7 @@ class TestDeployOvercloud(fakes.TestDeployOvercloud):
deployment_timeout=448, # 451 - 3, total time left deployment_timeout=448, # 451 - 3, total time left
in_flight_validations=False, limit_hosts=None, in_flight_validations=False, limit_hosts=None,
skip_tags=None, tags=None, timeout=42, skip_tags=None, tags=None, timeout=42,
verbosity=3, forks=None)], verbosity=3, forks=None, denyed_hostnames=None)],
fixture.mock_config_download.mock_calls) fixture.mock_config_download.mock_calls)
fixture.mock_config_download.assert_called() fixture.mock_config_download.assert_called()
mock_copy.assert_called_once() mock_copy.assert_called_once()

View File

@ -66,6 +66,7 @@ class TestDeploymentWorkflows(utils.TestCommand):
def test_get_overcloud_hosts(self, mock_role_net_ip_map, def test_get_overcloud_hosts(self, mock_role_net_ip_map,
mock_blacklisted_ip_addresses): mock_blacklisted_ip_addresses):
stack = mock.Mock() stack = mock.Mock()
working_dir = mock.Mock()
mock_role_net_ip_map.return_value = { mock_role_net_ip_map.return_value = {
'Controller': { 'Controller': {
'ctlplane': ['1.1.1.1', '2.2.2.2', '3.3.3.3'], 'ctlplane': ['1.1.1.1', '2.2.2.2', '3.3.3.3'],
@ -76,12 +77,12 @@ class TestDeploymentWorkflows(utils.TestCommand):
} }
mock_blacklisted_ip_addresses.return_value = [] mock_blacklisted_ip_addresses.return_value = []
ips = deployment.get_overcloud_hosts(stack, 'ctlplane') ips = deployment.get_overcloud_hosts(stack, 'ctlplane', working_dir)
expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3', expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3',
'7.7.7.7', '8.8.8.8', '9.9.9.9'] '7.7.7.7', '8.8.8.8', '9.9.9.9']
self.assertEqual(sorted(expected), sorted(ips)) self.assertEqual(sorted(expected), sorted(ips))
ips = deployment.get_overcloud_hosts(stack, 'external') ips = deployment.get_overcloud_hosts(stack, 'external', working_dir)
expected = ['4.4.4.4', '5.5.5.5', '6.6.6.6', expected = ['4.4.4.4', '5.5.5.5', '6.6.6.6',
'10.10.10.10', '11.11.11.11', '12.12.12.12'] '10.10.10.10', '11.11.11.11', '12.12.12.12']
self.assertEqual(sorted(expected), sorted(ips)) self.assertEqual(sorted(expected), sorted(ips))
@ -92,6 +93,7 @@ class TestDeploymentWorkflows(utils.TestCommand):
self, mock_role_net_ip_map, self, mock_role_net_ip_map,
mock_blacklisted_ip_addresses): mock_blacklisted_ip_addresses):
stack = mock.Mock() stack = mock.Mock()
working_dir = mock.Mock()
stack.output_show.return_value = [] stack.output_show.return_value = []
mock_role_net_ip_map.return_value = { mock_role_net_ip_map.return_value = {
'Controller': { 'Controller': {
@ -103,19 +105,19 @@ class TestDeploymentWorkflows(utils.TestCommand):
} }
mock_blacklisted_ip_addresses.return_value = ['8.8.8.8'] mock_blacklisted_ip_addresses.return_value = ['8.8.8.8']
ips = deployment.get_overcloud_hosts(stack, 'ctlplane') ips = deployment.get_overcloud_hosts(stack, 'ctlplane', working_dir)
expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3', expected = ['1.1.1.1', '2.2.2.2', '3.3.3.3',
'7.7.7.7', '9.9.9.9'] '7.7.7.7', '9.9.9.9']
self.assertEqual(sorted(expected), sorted(ips)) self.assertEqual(sorted(expected), sorted(ips))
ips = deployment.get_overcloud_hosts(stack, 'external') ips = deployment.get_overcloud_hosts(stack, 'external', working_dir)
expected = ['4.4.4.4', '5.5.5.5', '6.6.6.6', expected = ['4.4.4.4', '5.5.5.5', '6.6.6.6',
'10.10.10.10', '12.12.12.12'] '10.10.10.10', '12.12.12.12']
self.assertEqual(sorted(expected), sorted(ips)) self.assertEqual(sorted(expected), sorted(ips))
mock_blacklisted_ip_addresses.return_value = ['7.7.7.7', '9.9.9.9', mock_blacklisted_ip_addresses.return_value = ['7.7.7.7', '9.9.9.9',
'2.2.2.2'] '2.2.2.2']
ips = deployment.get_overcloud_hosts(stack, 'external') ips = deployment.get_overcloud_hosts(stack, 'external', working_dir)
expected = ['4.4.4.4', '6.6.6.6', '11.11.11.11'] expected = ['4.4.4.4', '6.6.6.6', '11.11.11.11']
self.assertEqual(sorted(expected), sorted(ips)) self.assertEqual(sorted(expected), sorted(ips))
@ -129,7 +131,7 @@ class TestDeploymentWorkflows(utils.TestCommand):
stack.output_show.return_value = {'output': {'output_value': []}} stack.output_show.return_value = {'output': {'output_value': []}}
clients = mock.Mock() clients = mock.Mock()
deployment.config_download( deployment.config_download(
log, clients, stack, 'templates', 'ssh_user', log, clients, 'stacktest', 'templates', 'ssh_user',
'ssh_key', 'ssh_networks', 'output_dir', False, 'ssh_key', 'ssh_networks', 'output_dir', False,
'timeout') 'timeout')

View File

@ -987,6 +987,15 @@ def get_stack_output_item(stack, item):
return None return None
def get_stack_saved_output_item(output, working_dir):
outputs_dir = os.path.join(working_dir, 'outputs')
output_path = os.path.join(outputs_dir, output)
if not os.path.isfile(output_path):
return None
with open(output_path) as f:
return yaml.safe_load(f.read())
def get_overcloud_endpoint(stack): def get_overcloud_endpoint(stack):
return get_stack_output_item(stack, 'KeystoneURL') return get_stack_output_item(stack, 'KeystoneURL')
@ -1005,12 +1014,14 @@ def get_endpoint_map(stack):
return endpoint_map return endpoint_map
def get_blacklisted_ip_addresses(stack): def get_blacklisted_ip_addresses(working_dir):
return get_stack_output_item(stack, 'BlacklistedIpAddresses') return get_stack_saved_output_item(
'BlacklistedIpAddresses', working_dir)
def get_role_net_ip_map(stack): def get_role_net_ip_map(working_dir):
return get_stack_output_item(stack, 'RoleNetIpMap') return get_stack_saved_output_item(
'RoleNetIpMap', working_dir)
def get_endpoint(key, stack): def get_endpoint(key, stack):
@ -3182,3 +3193,13 @@ def parse_ansible_inventory(inventory_file, group):
sources=[inventory_file]) sources=[inventory_file])
return(inventory.get_hosts(pattern=group)) return(inventory.get_hosts(pattern=group))
def save_stack_outputs(heat, stack, working_dir):
outputs_dir = os.path.join(working_dir, 'outputs')
makedirs(outputs_dir)
for output in constants.STACK_OUTPUTS:
val = get_stack_output_item(stack, output)
output_path = os.path.join(outputs_dir, output)
with open(output_path, 'w') as f:
f.write(yaml.dump(val))

View File

@ -1146,6 +1146,11 @@ class DeployOvercloud(command.Command):
self.deploy_tripleo_heat_templates( self.deploy_tripleo_heat_templates(
stack, parsed_args, new_tht_root, stack, parsed_args, new_tht_root,
user_tht_root, created_env_files) user_tht_root, created_env_files)
stack = utils.get_stack(
self.orchestration_client, parsed_args.stack)
utils.save_stack_outputs(
self.orchestration_client, stack, self.working_dir)
except Exception: except Exception:
if parsed_args.heat_type != 'installed' and self.heat_launcher: if parsed_args.heat_type != 'installed' and self.heat_launcher:
self.log.info("Stopping ephemeral heat.") self.log.info("Stopping ephemeral heat.")
@ -1153,9 +1158,10 @@ class DeployOvercloud(command.Command):
utils.rm_heat(self.heat_launcher, backup_db=True) utils.rm_heat(self.heat_launcher, backup_db=True)
raise raise
# Get a new copy of the stack after stack update/create. If it was # Get a new copy of the stack after stack update/create. If it
# a create then the previous stack object would be None. # was a create then the previous stack object would be None.
stack = utils.get_stack(self.orchestration_client, parsed_args.stack) stack = utils.get_stack(
self.orchestration_client, parsed_args.stack)
overcloud_endpoint = None overcloud_endpoint = None
old_rcpath = None old_rcpath = None
@ -1195,7 +1201,7 @@ class DeployOvercloud(command.Command):
if not parsed_args.config_download_only: if not parsed_args.config_download_only:
deployment.get_hosts_and_enable_ssh_admin( deployment.get_hosts_and_enable_ssh_admin(
stack, parsed_args.stack,
parsed_args.overcloud_ssh_network, parsed_args.overcloud_ssh_network,
parsed_args.overcloud_ssh_user, parsed_args.overcloud_ssh_user,
self.get_key_pair(parsed_args), self.get_key_pair(parsed_args),
@ -1226,7 +1232,7 @@ class DeployOvercloud(command.Command):
deployment.config_download( deployment.config_download(
self.log, self.log,
self.clients, self.clients,
stack, parsed_args.stack,
parsed_args.overcloud_ssh_network, parsed_args.overcloud_ssh_network,
config_download_dir, config_download_dir,
parsed_args.override_ansible_cfg, parsed_args.override_ansible_cfg,
@ -1240,7 +1246,9 @@ class DeployOvercloud(command.Command):
limit_hosts=utils.playbook_limit_parse( limit_hosts=utils.playbook_limit_parse(
limit_nodes=parsed_args.limit limit_nodes=parsed_args.limit
), ),
forks=parsed_args.ansible_forks) forks=parsed_args.ansible_forks,
denyed_hostnames=utils.get_stack_saved_output_item(
'BlacklistedHostnames', self.working_dir))
deployment.set_deployment_status( deployment.set_deployment_status(
stack.stack_name, stack.stack_name,
status=deploy_status, status=deploy_status,
@ -1272,7 +1280,7 @@ class DeployOvercloud(command.Command):
deploy_status = 'DEPLOY_FAILED' deploy_status = 'DEPLOY_FAILED'
deploy_message = 'with error' deploy_message = 'with error'
deployment.set_deployment_status( deployment.set_deployment_status(
stack.stack_name, parsed_args.stack,
status=deploy_status, status=deploy_status,
working_dir=self.working_dir) working_dir=self.working_dir)
finally: finally:

View File

@ -138,10 +138,7 @@ class ExternalUpdateRun(command.Command):
deployment.config_download( deployment.config_download(
log=self.log, log=self.log,
clients=self.app.client_manager, clients=self.app.client_manager,
stack=oooutils.get_stack( stack_name=parsed_args.stack,
self.app.client_manager.orchestration,
parsed_args.stack
),
output_dir=ansible_dir, output_dir=ansible_dir,
verbosity=oooutils.playbook_verbosity(self=self), verbosity=oooutils.playbook_verbosity(self=self),
ansible_playbook_name=constants.EXTERNAL_UPDATE_PLAYBOOKS, ansible_playbook_name=constants.EXTERNAL_UPDATE_PLAYBOOKS,

View File

@ -130,10 +130,7 @@ class ExternalUpgradeRun(command.Command):
deployment.config_download( deployment.config_download(
log=self.log, log=self.log,
clients=self.app.client_manager, clients=self.app.client_manager,
stack=oooutils.get_stack( stack_name=parsed_args.stack,
self.app.client_manager.orchestration,
parsed_args.stack
),
output_dir=ansible_dir, output_dir=ansible_dir,
verbosity=oooutils.playbook_verbosity(self=self), verbosity=oooutils.playbook_verbosity(self=self),
ansible_playbook_name=constants.EXTERNAL_UPGRADE_PLAYBOOKS, ansible_playbook_name=constants.EXTERNAL_UPGRADE_PLAYBOOKS,

View File

@ -229,10 +229,7 @@ class UpgradeRun(command.Command):
deployment.config_download( deployment.config_download(
log=self.log, log=self.log,
clients=self.app.client_manager, clients=self.app.client_manager,
stack=oooutils.get_stack( stack_name=parsed_args.stack,
self.app.client_manager.orchestration,
parsed_args.stack
),
output_dir=ansible_dir, output_dir=ansible_dir,
verbosity=oooutils.playbook_verbosity(self=self), verbosity=oooutils.playbook_verbosity(self=self),
ansible_playbook_name=playbook, ansible_playbook_name=playbook,

View File

@ -103,10 +103,10 @@ def deploy_without_plan(clients, stack, stack_name, template,
raise exceptions.DeploymentError("Heat Stack update failed.") raise exceptions.DeploymentError("Heat Stack update failed.")
def get_overcloud_hosts(stack, ssh_network): def get_overcloud_hosts(stack, ssh_network, working_dir):
ips = [] ips = []
role_net_ip_map = utils.get_role_net_ip_map(stack) role_net_ip_map = utils.get_role_net_ip_map(working_dir)
blacklisted_ips = utils.get_blacklisted_ip_addresses(stack) blacklisted_ips = utils.get_blacklisted_ip_addresses(working_dir)
if not role_net_ip_map: if not role_net_ip_map:
raise exceptions.DeploymentError( raise exceptions.DeploymentError(
'No overcloud hosts were found in the current stack.' 'No overcloud hosts were found in the current stack.'
@ -136,7 +136,7 @@ def get_overcloud_hosts(stack, ssh_network):
return ips return ips
def get_hosts_and_enable_ssh_admin(stack, overcloud_ssh_network, def get_hosts_and_enable_ssh_admin(stack_name, overcloud_ssh_network,
overcloud_ssh_user, overcloud_ssh_key, overcloud_ssh_user, overcloud_ssh_key,
overcloud_ssh_port_timeout, overcloud_ssh_port_timeout,
working_dir, verbosity=0, working_dir, verbosity=0,
@ -146,8 +146,8 @@ def get_hosts_and_enable_ssh_admin(stack, overcloud_ssh_network,
Get a list of hosts from a given stack and enable admin ssh across all of Get a list of hosts from a given stack and enable admin ssh across all of
them. them.
:param stack: Stack data. :param stack_name: Stack name.
:type stack: Object :type stack_name: String
:param overcloud_ssh_network: Network id. :param overcloud_ssh_network: Network id.
:type overcloud_ssh_network: String :type overcloud_ssh_network: String
@ -165,10 +165,10 @@ def get_hosts_and_enable_ssh_admin(stack, overcloud_ssh_network,
:type verbosity: Integer :type verbosity: Integer
""" """
hosts = get_overcloud_hosts(stack, overcloud_ssh_network) hosts = get_overcloud_hosts(stack_name, overcloud_ssh_network, working_dir)
if [host for host in hosts if host]: if [host for host in hosts if host]:
enable_ssh_admin( enable_ssh_admin(
stack, stack_name,
hosts, hosts,
overcloud_ssh_user, overcloud_ssh_user,
overcloud_ssh_key, overcloud_ssh_key,
@ -180,18 +180,18 @@ def get_hosts_and_enable_ssh_admin(stack, overcloud_ssh_network,
else: else:
raise exceptions.DeploymentError( raise exceptions.DeploymentError(
'Cannot find any hosts on "{}" in network "{}"'.format( 'Cannot find any hosts on "{}" in network "{}"'.format(
stack.stack_name, stack_name,
overcloud_ssh_network overcloud_ssh_network
) )
) )
def enable_ssh_admin(stack, hosts, ssh_user, ssh_key, timeout, def enable_ssh_admin(stack_name, hosts, ssh_user, ssh_key, timeout,
working_dir, verbosity=0, heat_type='installed'): working_dir, verbosity=0, heat_type='installed'):
"""Run enable ssh admin access playbook. """Run enable ssh admin access playbook.
:param stack: Stack data. :param stack_name: Stack name.
:type stack: Object :type stack_name: String
:param hosts: Machines to connect to. :param hosts: Machines to connect to.
:type hosts: List :type hosts: List
@ -237,7 +237,7 @@ def enable_ssh_admin(stack, hosts, ssh_user, ssh_key, timeout,
extra_vars={ extra_vars={
"ssh_user": ssh_user, "ssh_user": ssh_user,
"ssh_servers": hosts, "ssh_servers": hosts,
'tripleo_cloud_name': stack.stack_name 'tripleo_cloud_name': stack_name
}, },
ansible_timeout=timeout ansible_timeout=timeout
) )
@ -247,14 +247,15 @@ def enable_ssh_admin(stack, hosts, ssh_user, ssh_key, timeout,
print("Enabling ssh admin - COMPLETE.") print("Enabling ssh admin - COMPLETE.")
def config_download(log, clients, stack, ssh_network='ctlplane', def config_download(log, clients, stack_name, ssh_network='ctlplane',
output_dir=None, override_ansible_cfg=None, output_dir=None, override_ansible_cfg=None,
timeout=600, verbosity=0, deployment_options=None, timeout=600, verbosity=0, deployment_options=None,
in_flight_validations=False, in_flight_validations=False,
ansible_playbook_name='deploy_steps_playbook.yaml', ansible_playbook_name='deploy_steps_playbook.yaml',
limit_hosts=None, extra_vars=None, inventory_path=None, limit_hosts=None, extra_vars=None, inventory_path=None,
ssh_user='tripleo-admin', tags=None, skip_tags=None, ssh_user='tripleo-admin', tags=None, skip_tags=None,
deployment_timeout=None, forks=None, working_dir=None): deployment_timeout=None, forks=None, working_dir=None,
denyed_hostnames=None):
"""Run config download. """Run config download.
:param log: Logging object :param log: Logging object
@ -344,7 +345,7 @@ def config_download(log, clients, stack, ssh_network='ctlplane',
output_dir = DEFAULT_WORK_DIR output_dir = DEFAULT_WORK_DIR
if not working_dir: if not working_dir:
working_dir = utils.get_default_working_dir(stack.stack_name) working_dir = utils.get_default_working_dir(stack_name)
if not deployment_options: if not deployment_options:
deployment_options = dict() deployment_options = dict()
@ -373,22 +374,18 @@ def config_download(log, clients, stack, ssh_network='ctlplane',
_log_and_print( _log_and_print(
message='Checking for blacklisted hosts from stack: {}'.format( message='Checking for blacklisted hosts from stack: {}'.format(
stack.stack_name stack_name
), ),
logger=log, logger=log,
print_msg=(verbosity == 0) print_msg=(verbosity == 0)
) )
if not limit_hosts: if not limit_hosts:
blacklist_show = stack.output_show('BlacklistedHostnames') if denyed_hostnames:
blacklist_stack_output = blacklist_show.get('output', dict())
blacklist_stack_output_value = blacklist_stack_output.get(
'output_value')
if blacklist_stack_output_value:
limit_hosts = ( limit_hosts = (
':'.join(['!{}'.format(i) for i in blacklist_stack_output_value ':'.join(['!{}'.format(i) for i in denyed_hostnames
if i])) if i]))
key_file = utils.get_key(stack.stack_name) key_file = utils.get_key(stack_name)
python_interpreter = deployment_options.get('ansible_python_interpreter') python_interpreter = deployment_options.get('ansible_python_interpreter')
playbook = 'cli-config-download.yaml' playbook = 'cli-config-download.yaml'
@ -402,7 +399,7 @@ def config_download(log, clients, stack, ssh_network='ctlplane',
verbosity=verbosity, verbosity=verbosity,
reproduce_command=True, reproduce_command=True,
extra_vars={ extra_vars={
'plan': stack.stack_name, 'plan': stack_name,
'output_dir': output_dir, 'output_dir': output_dir,
'ansible_ssh_user': ssh_user, 'ansible_ssh_user': ssh_user,
'ansible_ssh_private_key_file': key_file, 'ansible_ssh_private_key_file': key_file,
@ -414,13 +411,13 @@ def config_download(log, clients, stack, ssh_network='ctlplane',
_log_and_print( _log_and_print(
message='Executing deployment playbook for stack: {}'.format( message='Executing deployment playbook for stack: {}'.format(
stack.stack_name stack_name
), ),
logger=log, logger=log,
print_msg=(verbosity == 0) print_msg=(verbosity == 0)
) )
stack_work_dir = os.path.join(output_dir, stack.stack_name) stack_work_dir = os.path.join(output_dir, stack_name)
if not inventory_path: if not inventory_path:
inventory_path = os.path.join(stack_work_dir, inventory_path = os.path.join(stack_work_dir,
'inventory') 'inventory')
@ -455,7 +452,7 @@ def config_download(log, clients, stack, ssh_network='ctlplane',
_log_and_print( _log_and_print(
message='Overcloud configuration completed for stack: {}'.format( message='Overcloud configuration completed for stack: {}'.format(
stack.stack_name stack_name
), ),
logger=log, logger=log,
print_msg=(verbosity == 0) print_msg=(verbosity == 0)