nova-less-deploy: deploy_instances and undeploy_instances workflows

This change adds a workflow to provision bare metal machines using
Ironic API without Nova (through the metalsmith library for convenience).
A simple workflow to unprovision nodes is included as well.

This is the first step towards supporting deployment without Nova.

Change-Id: I7c7aeb83691865d37ebed4d6cad7524339fdb799
Implements: blueprint nova-less-deploy
This commit is contained in:
Dmitry Tantsur 2018-06-20 15:28:45 +02:00
parent 2741a21e2f
commit 1ca666b2df
6 changed files with 919 additions and 3 deletions

View File

@ -28,3 +28,5 @@ python-keystoneclient>=3.8.0 # Apache-2.0
keystoneauth1>=3.4.0 # Apache-2.0 keystoneauth1>=3.4.0 # Apache-2.0
tenacity>=4.4.0 # Apache-2.0 tenacity>=4.4.0 # Apache-2.0
futures>=3.0.0;python_version=='2.7' or python_version=='2.6' # BSD futures>=3.0.0;python_version=='2.7' or python_version=='2.6' # BSD
metalsmith>=0.8.0 # Apache-2.0
jsonschema<3.0.0,>=2.6.0 # MIT

View File

@ -81,6 +81,11 @@ mistral.actions =
tripleo.baremetal.validate_nodes = tripleo_common.actions.baremetal:ValidateNodes tripleo.baremetal.validate_nodes = tripleo_common.actions.baremetal:ValidateNodes
tripleo.baremetal.get_candidate_nodes = tripleo_common.actions.baremetal:GetCandidateNodes tripleo.baremetal.get_candidate_nodes = tripleo_common.actions.baremetal:GetCandidateNodes
tripleo.baremetal.probe_node = tripleo_common.actions.baremetal:ProbeNode tripleo.baremetal.probe_node = tripleo_common.actions.baremetal:ProbeNode
tripleo.baremetal_deploy.check_existing_instances = tripleo_common.actions.baremetal_deploy:CheckExistingInstancesAction
tripleo.baremetal_deploy.deploy_node = tripleo_common.actions.baremetal_deploy:DeployNodeAction
tripleo.baremetal_deploy.reserve_nodes = tripleo_common.actions.baremetal_deploy:ReserveNodesAction
tripleo.baremetal_deploy.undeploy_instance = tripleo_common.actions.baremetal_deploy:UndeployInstanceAction
tripleo.baremetal_deploy.wait_for_deploy = tripleo_common.actions.baremetal_deploy:WaitForDeploymentAction
tripleo.config.download_config = tripleo_common.actions.config:DownloadConfigAction tripleo.config.download_config = tripleo_common.actions.config:DownloadConfigAction
tripleo.config.get_overcloud_config = tripleo_common.actions.config:GetOvercloudConfig tripleo.config.get_overcloud_config = tripleo_common.actions.config:GetOvercloudConfig
tripleo.container_images.prepare = tripleo_common.actions.container_images:PrepareContainerImageEnv tripleo.container_images.prepare = tripleo_common.actions.container_images:PrepareContainerImageEnv

View File

@ -0,0 +1,343 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import jsonschema
import metalsmith
from metalsmith import sources
from mistral_lib import actions
import six
from tripleo_common.actions import base
from tripleo_common.utils import keystone
LOG = logging.getLogger(__name__)
def _provisioner(context):
session = keystone.get_session(context)
return metalsmith.Provisioner(session=session)
_INSTANCES_INPUT_SCHEMA = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'capabilities': {'type': 'object'},
'hostname': {'type': 'string',
'minLength': 2,
'maxLength': 255},
'image': {'type': 'string'},
'image_checksum': {'type': 'string'},
'image_kernel': {'type': 'string'},
'image_ramdisk': {'type': 'string'},
'name': {'type': 'string'},
'nics': {'type': 'array',
'items': {'type': 'object',
'properties': {
'network': {'type': 'string'},
'port': {'type': 'string'},
'fixed_ip': {'type': 'string'},
},
'additionalProperties': False}},
'profile': {'type': 'string'},
'resource_class': {'type': 'string'},
'root_size_gb': {'type': 'integer', 'minimum': 4},
'swap_size_mb': {'type': 'integer', 'minimum': 64},
'traits': {'type': 'array',
'items': {'type': 'string'}},
},
'additionalProperties': False,
# Host name is required, but defaults to name in _validate_instances
'required': ['hostname'],
}
}
"""JSON schema of the input for these actions."""
class CheckExistingInstancesAction(base.TripleOAction):
"""Detect which requested instances have already been provisioned."""
def __init__(self, instances):
super(CheckExistingInstancesAction, self).__init__()
self.instances = instances
def run(self, context):
try:
_validate_instances(self.instances)
except Exception as exc:
LOG.error('Failed to validate provided instances. %s', exc)
return actions.Result(error=six.text_type(exc))
provisioner = _provisioner(context)
not_found = []
found = []
for request in self.instances:
try:
instance = provisioner.show_instance(request['hostname'])
# TODO(dtantsur): use openstacksdk exceptions when metalsmith
# is bumped to 0.9.0.
except Exception:
not_found.append(request)
else:
found.append(instance.to_dict())
if found:
LOG.info('Found existing instances: %s',
', '.join('%s (on node %s)' % (i['hostname'], i['uuid'])
for i in found))
if not_found:
LOG.info('Instance(s) %s do not exist',
', '.join(r['hostname'] for r in not_found))
return {
'not_found': not_found,
'instances': found
}
class ReserveNodesAction(base.TripleOAction):
"""Reserve nodes for requested instances."""
def __init__(self, instances, default_resource_class='baremetal'):
super(ReserveNodesAction, self).__init__()
self.instances = instances
self.default_resource_class = default_resource_class
def run(self, context):
try:
_validate_instances(self.instances)
except Exception as exc:
LOG.error('Failed to validate provided instances. %s', exc)
return actions.Result(error=six.text_type(exc))
provisioner = _provisioner(context)
# TODO(dtantsur): looping over instances is not very optimal, change it
# to metalsmith plan deployment API when it's available.
result = []
nodes = []
try:
for instance in self.instances:
LOG.debug('Trying to reserve a node for instance %s', instance)
if instance.get('name'):
# NOTE(dtantsur): metalsmith accepts list of nodes to pick
# from. We implement a simplest case when a user can pick a
# node by its name (actually, UUID will also work).
candidates = [instance['name']]
else:
candidates = None
if instance.get('profile'):
# TODO(dtantsur): change to traits?
instance.setdefault(
'capabilities', {})['profile'] = instance['profile']
node = provisioner.reserve_node(
resource_class=instance.get('resource_class') or
self.default_resource_class,
capabilities=instance.get('capabilities'),
candidates=candidates,
traits=instance.get('traits'))
LOG.info('Reserved node %s for instance %s', node, instance)
nodes.append(node)
try:
node_id = node.id
except AttributeError:
# TODO(dtantsur): transition from ironicclient to
# openstacksdk, remove when metalsmith is bumped to 0.9.0
node_id = node.uuid
result.append({'node': node_id, 'instance': instance})
except Exception as exc:
LOG.exception('Provisioning failed, cleaning up')
# Remove all reservations on failure
try:
_release_nodes(provisioner, nodes)
except Exception:
LOG.exception('Clean up failed, some nodes may still be '
'reserved by failed instances')
return actions.Result(
error="%s: %s" % (type(exc).__name__, exc)
)
return {'reservations': result}
class DeployNodeAction(base.TripleOAction):
"""Provision instance on a previously reserved node."""
def __init__(self, instance, node, ssh_keys=None,
# For compatibility with deployment based on heat+nova
ssh_user_name='heat-admin',
default_image='overcloud-full',
default_network='ctlplane',
# 50 is the default for old flavors, subtracting 1G to account
# for partitioning and configdrive.
default_root_size=49):
super(DeployNodeAction, self).__init__()
self.instance = instance
self.node = node
self.config = metalsmith.InstanceConfig(ssh_keys=ssh_keys)
self.config.add_user(ssh_user_name, admin=True, sudo=True)
self.default_image = default_image
self.default_network = default_network
self.default_root_size = default_root_size
def _get_image(self):
# TODO(dtantsur): move this logic to metalsmith in 0.9.0
image = self.instance.get('image', self.default_image)
image_type = _link_type(image)
if image_type == 'glance':
return sources.GlanceImage(image)
else:
checksum = self.instance.get('image_checksum')
if (checksum and image_type == 'http' and
_link_type(checksum) == 'http'):
kwargs = {'checksum_url': checksum}
else:
kwargs = {'checksum': checksum}
whole_disk_image = not (self.instance.get('image_kernel') or
self.instance.get('image_ramdisk'))
if whole_disk_image:
if image_type == 'http':
return sources.HttpWholeDiskImage(image, **kwargs)
else:
return sources.FileWholeDiskImage(image, **kwargs)
else:
if image_type == 'http':
return sources.HttpPartitionImage(
image,
kernel_url=self.instance.get('image_kernel'),
ramdisk_url=self.instance.get('image_ramdisk'),
**kwargs)
else:
return sources.FilePartitionImage(
image,
kernel_location=self.instance.get('image_kernel'),
ramdisk_location=self.instance.get('image_ramdisk'),
**kwargs)
def run(self, context):
try:
_validate_instances([self.instance])
except Exception as exc:
LOG.error('Failed to validate the request. %s', exc)
return actions.Result(error=six.text_type(exc))
provisioner = _provisioner(context)
LOG.debug('Starting provisioning of %s on node %s',
self.instance, self.node)
try:
instance = provisioner.provision_node(
self.node,
config=self.config,
hostname=self.instance['hostname'],
image=self._get_image(),
nics=self.instance.get('nics',
[{'network': self.default_network}]),
root_size_gb=self.instance.get('root_size_gb',
self.default_root_size),
swap_size_mb=self.instance.get('swap_size_mb'),
)
except Exception as exc:
LOG.exception('Provisioning of %s on node %s failed',
self.instance, self.node)
try:
_release_nodes(provisioner, [self.node])
except Exception:
LOG.exception('Clean up failed, node %s may still be '
'reserved by the failed instance', self.node)
return actions.Result(
error="%s: %s" % (type(exc).__name__, exc)
)
LOG.info('Started provisioning of %s on node %s',
self.instance, self.node)
return instance.to_dict()
class WaitForDeploymentAction(base.TripleOAction):
"""Wait for the instance to be deployed."""
def __init__(self, instance, timeout=3600):
super(WaitForDeploymentAction, self).__init__()
self.instance = instance
self.timeout = timeout
def run(self, context):
provisioner = _provisioner(context)
LOG.debug('Waiting for instance %s to provision',
self.instance['hostname'])
instance = provisioner.wait_for_provisioning([self.instance['uuid']],
timeout=self.timeout)[0]
LOG.info('Successfully provisioned instance %s',
self.instance['hostname'])
return instance.to_dict()
class UndeployInstanceAction(base.TripleOAction):
"""Undeploy a previously deployed instance."""
def __init__(self, instance, timeout=1800):
super(UndeployInstanceAction, self).__init__()
self.instance = instance
self.timeout = timeout
def run(self, context):
provisioner = _provisioner(context)
try:
instance = provisioner.show_instance(self.instance)
except Exception:
LOG.warning('Cannot get instance %s, assuming already deleted',
self.instance)
return
LOG.debug('Unprovisioning instance %s', instance.hostname)
provisioner.unprovision_node(instance.node, wait=self.timeout)
LOG.info('Successfully unprovisioned %s', instance.hostname)
def _validate_instances(instances):
for inst in instances:
if inst.get('name') and not inst.get('hostname'):
inst['hostname'] = inst['name']
jsonschema.validate(instances, _INSTANCES_INPUT_SCHEMA)
def _release_nodes(provisioner, nodes):
for node in nodes:
LOG.debug('Removing reservation from node %s', node)
try:
provisioner.unprovision_node(node)
except Exception:
LOG.exception('Unable to release node %s, moving on', node)
else:
LOG.info('Removed reservation from node %s', node)
def _link_type(image):
if image.startswith('http://') or image.startswith('https://'):
return 'http'
elif image.startswith('file://'):
return 'file'
else:
return 'glance'

View File

@ -0,0 +1,342 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from metalsmith import sources
import mock
from tripleo_common.actions import baremetal_deploy
from tripleo_common.tests import base
@mock.patch.object(baremetal_deploy, '_provisioner', autospec=True)
class TestReserveNodes(base.TestCase):
def test_success(self, mock_pr):
instances = [
{'hostname': 'host1', 'profile': 'compute'},
{'hostname': 'host2', 'resource_class': 'compute',
'capabilities': {'answer': '42'}},
{'name': 'control-0', 'traits': ['CUSTOM_GPU']},
]
action = baremetal_deploy.ReserveNodesAction(instances)
result = action.run(mock.Mock())
self.assertEqual(
[{'node': mock_pr.return_value.reserve_node.return_value.id,
'instance': req} for req in instances],
result['reservations'])
mock_pr.return_value.reserve_node.assert_has_calls([
mock.call(resource_class='baremetal', traits=None,
capabilities={'profile': 'compute'}, candidates=None),
mock.call(resource_class='compute', traits=None,
capabilities={'answer': '42'}, candidates=None),
mock.call(resource_class='baremetal', traits=['CUSTOM_GPU'],
capabilities=None, candidates=['control-0']),
])
self.assertFalse(mock_pr.return_value.unprovision_node.called)
def test_missing_hostname(self, mock_pr):
instances = [
{'hostname': 'host1'},
{'resource_class': 'compute', 'capabilities': {'answer': '42'}}
]
action = baremetal_deploy.ReserveNodesAction(instances)
result = action.run(mock.Mock())
self.assertIn("'hostname' is a required property", result.error)
self.assertFalse(mock_pr.return_value.reserve_node.called)
self.assertFalse(mock_pr.return_value.unprovision_node.called)
def test_failure(self, mock_pr):
instances = [
{'hostname': 'host1'},
{'hostname': 'host2', 'resource_class': 'compute',
'capabilities': {'answer': '42'}},
{'hostname': 'host3'},
]
success_node = mock.Mock(uuid='uuid1')
mock_pr.return_value.reserve_node.side_effect = [
success_node,
RuntimeError("boom"),
]
action = baremetal_deploy.ReserveNodesAction(instances)
result = action.run(mock.Mock())
self.assertIn('RuntimeError: boom', result.error)
mock_pr.return_value.reserve_node.assert_has_calls([
mock.call(resource_class='baremetal', capabilities=None,
candidates=None, traits=None),
mock.call(resource_class='compute', capabilities={'answer': '42'},
candidates=None, traits=None)
])
mock_pr.return_value.unprovision_node.assert_called_once_with(
success_node)
@mock.patch.object(baremetal_deploy, '_provisioner', autospec=True)
class TestDeployNode(base.TestCase):
def test_success_defaults(self, mock_pr):
action = baremetal_deploy.DeployNodeAction(
instance={'hostname': 'host1'},
node='1234'
)
result = action.run(mock.Mock())
pr = mock_pr.return_value
self.assertEqual(
pr.provision_node.return_value.to_dict.return_value,
result)
pr.provision_node.assert_called_once_with(
'1234',
image=mock.ANY,
nics=[{'network': 'ctlplane'}],
hostname='host1',
root_size_gb=49,
swap_size_mb=None,
config=mock.ANY,
)
config = pr.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
self.assertEqual('heat-admin', config.users[0]['name'])
source = pr.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.GlanceImage)
# TODO(dtantsur): check the image when it's a public field
def test_success_with_name(self, mock_pr):
action = baremetal_deploy.DeployNodeAction(
instance={'name': 'host1'},
node='1234'
)
result = action.run(mock.Mock())
pr = mock_pr.return_value
self.assertEqual(
pr.provision_node.return_value.to_dict.return_value,
result)
pr.provision_node.assert_called_once_with(
'1234',
image=mock.ANY,
nics=[{'network': 'ctlplane'}],
hostname='host1',
root_size_gb=49,
swap_size_mb=None,
config=mock.ANY,
)
config = pr.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
self.assertEqual('heat-admin', config.users[0]['name'])
def test_success(self, mock_pr):
pr = mock_pr.return_value
action = baremetal_deploy.DeployNodeAction(
instance={'hostname': 'host1',
'image': 'overcloud-alt',
'nics': [{'port': 'abcd'}],
'root_size_gb': 100,
'swap_size_mb': 4096},
node='1234',
ssh_keys=['ssh key contents'],
ssh_user_name='admin',
)
result = action.run(mock.Mock())
self.assertEqual(
pr.provision_node.return_value.to_dict.return_value,
result)
pr.provision_node.assert_called_once_with(
'1234',
image=mock.ANY,
nics=[{'port': 'abcd'}],
hostname='host1',
root_size_gb=100,
swap_size_mb=4096,
config=mock.ANY,
)
config = pr.provision_node.call_args[1]['config']
self.assertEqual(['ssh key contents'], config.ssh_keys)
self.assertEqual('admin', config.users[0]['name'])
source = pr.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.GlanceImage)
# TODO(dtantsur): check the image when it's a public field
# NOTE(dtantsur): limited coverage for source detection since this code is
# being moved to metalsmith in 0.9.0.
def test_success_http_partition_image(self, mock_pr):
action = baremetal_deploy.DeployNodeAction(
instance={'hostname': 'host1',
'image': 'https://example/image',
'image_kernel': 'https://example/kernel',
'image_ramdisk': 'https://example/ramdisk',
'image_checksum': 'https://example/checksum'},
node='1234'
)
result = action.run(mock.Mock())
pr = mock_pr.return_value
self.assertEqual(
pr.provision_node.return_value.to_dict.return_value,
result)
pr.provision_node.assert_called_once_with(
'1234',
image=mock.ANY,
nics=[{'network': 'ctlplane'}],
hostname='host1',
root_size_gb=49,
swap_size_mb=None,
config=mock.ANY,
)
config = pr.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
self.assertEqual('heat-admin', config.users[0]['name'])
source = pr.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.HttpPartitionImage)
self.assertEqual('https://example/image', source.url)
self.assertEqual('https://example/kernel', source.kernel_url)
self.assertEqual('https://example/ramdisk', source.ramdisk_url)
self.assertEqual('https://example/checksum', source.checksum_url)
def test_success_file_partition_image(self, mock_pr):
action = baremetal_deploy.DeployNodeAction(
instance={'hostname': 'host1',
'image': 'file:///var/lib/ironic/image',
'image_kernel': 'file:///var/lib/ironic/kernel',
'image_ramdisk': 'file:///var/lib/ironic/ramdisk',
'image_checksum': 'abcd'},
node='1234'
)
result = action.run(mock.Mock())
pr = mock_pr.return_value
self.assertEqual(
pr.provision_node.return_value.to_dict.return_value,
result)
pr.provision_node.assert_called_once_with(
'1234',
image=mock.ANY,
nics=[{'network': 'ctlplane'}],
hostname='host1',
root_size_gb=49,
swap_size_mb=None,
config=mock.ANY,
)
config = pr.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
self.assertEqual('heat-admin', config.users[0]['name'])
source = pr.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.FilePartitionImage)
self.assertEqual('file:///var/lib/ironic/image', source.location)
self.assertEqual('file:///var/lib/ironic/kernel',
source.kernel_location)
self.assertEqual('file:///var/lib/ironic/ramdisk',
source.ramdisk_location)
self.assertEqual('abcd', source.checksum)
def test_failure(self, mock_pr):
pr = mock_pr.return_value
action = baremetal_deploy.DeployNodeAction(
instance={'hostname': 'host1'},
node='1234'
)
pr.provision_node.side_effect = RuntimeError('boom')
result = action.run(mock.Mock())
self.assertIn('RuntimeError: boom', result.error)
pr.provision_node.assert_called_once_with(
'1234',
image=mock.ANY,
nics=[{'network': 'ctlplane'}],
hostname='host1',
root_size_gb=49,
swap_size_mb=None,
config=mock.ANY,
)
pr.unprovision_node.assert_called_once_with('1234')
@mock.patch.object(baremetal_deploy, '_provisioner', autospec=True)
class TestCheckExistingInstances(base.TestCase):
def test_success(self, mock_pr):
pr = mock_pr.return_value
instances = [
{'hostname': 'host1'},
{'hostname': 'host2', 'resource_class': 'compute',
'capabilities': {'answer': '42'}}
]
existing = mock.MagicMock()
pr.show_instance.side_effect = [
RuntimeError('not found'),
existing,
]
action = baremetal_deploy.CheckExistingInstancesAction(instances)
result = action.run(mock.Mock())
self.assertEqual({
'instances': [existing.to_dict.return_value],
'not_found': [{'hostname': 'host1'}]
}, result)
pr.show_instance.assert_has_calls([
mock.call('host1'), mock.call('host2')
])
def test_missing_hostname(self, mock_pr):
instances = [
{'hostname': 'host1'},
{'resource_class': 'compute', 'capabilities': {'answer': '42'}}
]
action = baremetal_deploy.CheckExistingInstancesAction(instances)
result = action.run(mock.Mock())
self.assertIn("'hostname' is a required property", result.error)
self.assertFalse(mock_pr.return_value.show_instance.called)
@mock.patch.object(baremetal_deploy, '_provisioner', autospec=True)
class TestWaitForDeployment(base.TestCase):
def test_success(self, mock_pr):
pr = mock_pr.return_value
action = baremetal_deploy.WaitForDeploymentAction(
{'hostname': 'compute.cloud', 'uuid': 'uuid1'})
result = action.run(mock.Mock())
pr.wait_for_provisioning.assert_called_once_with(['uuid1'],
timeout=3600)
inst = pr.wait_for_provisioning.return_value[0]
self.assertIs(result, inst.to_dict.return_value)
@mock.patch.object(baremetal_deploy, '_provisioner', autospec=True)
class TestUndeployInstance(base.TestCase):
def test_success(self, mock_pr):
pr = mock_pr.return_value
action = baremetal_deploy.UndeployInstanceAction('inst1')
result = action.run(mock.Mock())
self.assertIsNone(result)
pr.show_instance.assert_called_once_with('inst1')
pr.unprovision_node.assert_called_once_with(
pr.show_instance.return_value.node, wait=1800)
def test_not_found(self, mock_pr):
pr = mock_pr.return_value
pr.show_instance.side_effect = RuntimeError('not found')
action = baremetal_deploy.UndeployInstanceAction('inst1')
result = action.run(mock.Mock())
self.assertIsNone(result)
pr.show_instance.assert_called_once_with('inst1')
self.assertFalse(pr.unprovision_node.called)

View File

@ -16,9 +16,10 @@
import six import six
from keystoneauth1.identity.generic import Token as IdentityToken
from keystoneauth1 import loading from keystoneauth1 import loading
from keystoneauth1 import session as ks_session from keystoneauth1 import session as ks_session
from keystoneauth1.token_endpoint import Token from keystoneauth1.token_endpoint import Token as SimpleToken
from keystoneclient import service_catalog as ks_service_catalog from keystoneclient import service_catalog as ks_service_catalog
from keystoneclient.v3 import client as ks_client from keystoneclient.v3 import client as ks_client
from keystoneclient.v3 import endpoints as ks_endpoints from keystoneclient.v3 import endpoints as ks_endpoints
@ -90,10 +91,10 @@ def get_session_and_auth(context, **kwargs):
} }
) )
auth = Token(endpoint=endpoint, token=context.auth_token) auth = SimpleToken(endpoint=endpoint, token=context.auth_token)
auth_uri = context.auth_uri or CONF.keystone_authtoken.auth_uri auth_uri = context.auth_uri or CONF.keystone_authtoken.auth_uri
ks_auth = Token( ks_auth = SimpleToken(
endpoint=auth_uri, endpoint=auth_uri,
token=context.auth_token token=context.auth_token
) )
@ -108,6 +109,39 @@ def get_session_and_auth(context, **kwargs):
} }
# NOTE(dtantsur): get_session_and_auth returns a session tied to a specific
# service. This function returns a generic session. Eventually we should switch
# everything to using it and service-specific Adapter on top.
def get_session(context):
"""Get a generic session suitable for any service(s).
:param context: action context
:return: keystone `Session`
"""
try:
context = context.security
except AttributeError:
pass
auth_uri = context.auth_uri or CONF.keystone_authtoken.auth_uri
try:
# TODO(dtantsur): a better way to detect the project domain?
project_domain = context.service_catalog['project']['domain']['name']
except KeyError:
project_domain = CONF.keystone_authtoken.project_domain_name
ks_auth = IdentityToken(auth_uri, token=context.auth_token,
# NOTE(dtantsur): project scope is required for V3
project_name=context.project_name,
project_domain_name=project_domain)
sess = ks_session.Session(
auth=ks_auth,
verify=_determine_verify(context)
)
return sess
def _admin_client(trust_id=None): def _admin_client(trust_id=None):
if CONF.keystone_authtoken.auth_type is None: if CONF.keystone_authtoken.auth_type is None:
auth_url = CONF.keystone_authtoken.auth_uri auth_url = CONF.keystone_authtoken.auth_uri

View File

@ -0,0 +1,190 @@
---
version: '2.0'
name: tripleo.baremetal_deploy.v1
description: TripleO Baremetal Deployment Workflows
workflows:
_deploy_one:
description: Internal workflow to deploy one node
input:
- instance
- node
- ssh_keys: []
- ssh_user_name: heat-admin
- timeout: 3600
- queue_name: tripleo
tags:
- tripleo-common-managed
tasks:
deploy_node:
action: tripleo.baremetal_deploy.deploy_node
input:
instance: <% $.instance %>
node: <% $.node %>
ssh_keys: <% $.ssh_keys %>
ssh_user_name: <% $.ssh_user_name %>
publish:
instance: <% task().result %>
publish-on-error:
status: FAILED
message: <% task().result %>
on-success: wait_for_deploy
on-error: send_message
wait_for_deploy:
action: tripleo.baremetal_deploy.wait_for_deploy
input:
instance: <% $.instance %>
timeout: <% $.timeout %>
publish:
instance: <% task().result %>
message: Instance <% task().result.hostname %> deployed successfully
publish-on-error:
status: FAILED
message: <% task().result %>
on-complete: send_message
send_message:
workflow: tripleo.messaging.v1.send
input:
queue_name: <% $.queue_name %>
type: <% execution().name %>
status: <% $.get('status', 'SUCCESS') %>
message: <% $.get('message', '') %>
execution: <% execution() %>
instance: <% $.instance %>
on-success:
- fail: <% $.get('status', 'SUCCESS') != 'SUCCESS' %>
output:
instance: <% $.instance %>
output-on-error:
result: <% $.get('message', 'Deployment failed') %>
deploy_instances:
description: Deploy instances on bare metal nodes.
input:
- instances
- ssh_keys: []
- ssh_user_name: heat-admin
- timeout: 3600
- concurrency: 20
- queue_name: tripleo
tags:
- tripleo-common-managed
tasks:
find_existing_instances:
action: tripleo.baremetal_deploy.check_existing_instances
input:
instances: <% $.instances %>
publish:
instances: <% task().result.not_found %>
existing_instances: <% task().result.instances %>
publish-on-error:
status: FAILED
message: <% task().result %>
on-success: reserve_nodes
on-error: send_message
reserve_nodes:
action: tripleo.baremetal_deploy.reserve_nodes
input:
instances: <% $.instances %>
publish:
reservations: <% task().result.reservations %>
publish-on-error:
status: FAILED
message: <% task().result %>
on-success: deploy_nodes
on-error: send_message
deploy_nodes:
with-items: reservation in <% $.reservations %>
concurrency: <% $.concurrency %>
workflow: _deploy_one
input:
instance: <% $.reservation.instance %>
node: <% $.reservation.node %>
ssh_keys: <% $.ssh_keys %>
ssh_user_name: <% $.ssh_user_name %>
timeout: <% $.timeout %>
queue_name: <% $.queue_name %>
publish:
all_instances: <% task().result.instance + $.existing_instances %>
new_instances: <% task().result.instance %>
publish-on-error:
status: FAILED
message: <% task().result %>
on-success: publish_result
on-error: send_message
publish_result:
publish:
ctlplane_ips: <% $.all_instances.toDict($.hostname, $.ip_addresses.ctlplane[0]) %>
instances: <% $.all_instances.toDict($.hostname, $) %>
on-complete: send_message
send_message:
workflow: tripleo.messaging.v1.send
input:
queue_name: <% $.queue_name %>
type: <% execution().name %>
status: <% $.get('status', 'SUCCESS') %>
message: <% $.get('message', '') %>
execution: <% execution() %>
payload:
ctlplane_ips: <% $.get('ctlplane_ips', {}) %>
instances: <% $.get('instances', {}) %>
output:
ctlplane_ips: <% $.ctlplane_ips %>
existing_instances: <% $.existing_instances.toDict($.hostname, $) %>
instances: <% $.instances %>
new_instances: <% $.new_instances.toDict($.hostname, $) %>
undeploy_instances:
description: Undeploy previously deployed instances
input:
- instances
- timeout: 3600
- concurrency: 20
- queue_name: tripleo
tags:
- tripleo-common-managed
tasks:
undeploy_instances:
with-items: instance in <% $.instances %>
concurrency: <% $.concurrency %>
action: tripleo.baremetal_deploy.undeploy_instance
input:
instance: <% $.instance %>
timeout: <% $.timeout %>
publish-on-error:
status: FAILED
message: <% task().result %>
on-complete: send_message
send_message:
workflow: tripleo.messaging.v1.send
input:
queue_name: <% $.queue_name %>
type: <% execution().name %>
status: <% $.get('status', 'SUCCESS') %>
message: <% $.get('message', '') %>
execution: <% execution() %>