Allow assigning profiles based on XXX_profile capabilities
This change will allow users (or ironic-inspector) to provide several possible profiles for a node by setting capabilities like XXX_profile (where XXX = compute, controller...). Two new commands are added: openstack overcloud profiles match When no enough nodes with a given profile are found, this command will inspect nodes with such capabilities and choose missing nodes from them. openstack overcloud profiles list Lists all available and active nodes with their profiles and possible profiles. See the following thread for the full background: http://lists.openstack.org/pipermail/openstack-dev/2015-November/078884.html This changes refactores profile validation code in the deploy command to use the same logic as commands above. It's worth noting that this change also removes an incorrect assumption that a node can have multiple values for the same capability. It also makes sure we only take active and available nodes into account for all calculations. Change-Id: I398cf2052b280eaf67e5755412c35fe9551c341f
This commit is contained in:
parent
376af39339
commit
2e680eafa9
@ -67,5 +67,7 @@ openstack.tripleoclient.v1 =
|
||||
overcloud_image_build = tripleoclient.v1.overcloud_image:BuildOvercloudImage
|
||||
overcloud_image_upload = tripleoclient.v1.overcloud_image:UploadOvercloudImage
|
||||
overcloud_node_delete = tripleoclient.v1.overcloud_node:DeleteNode
|
||||
overcloud_profiles_match = tripleoclient.v1.overcloud_profiles:MatchProfiles
|
||||
overcloud_profiles_list = tripleoclient.v1.overcloud_profiles:ListProfiles
|
||||
overcloud_update_stack = tripleoclient.v1.overcloud_update:UpdateOvercloud
|
||||
undercloud_install = tripleoclient.v1.undercloud:InstallPlugin
|
||||
|
@ -56,3 +56,7 @@ class IntrospectionError(RuntimeError):
|
||||
|
||||
class StateTransitionFailed(Exception):
|
||||
"""Ironic node state transition failed"""
|
||||
|
||||
|
||||
class ProfileMatchingError(Exception):
|
||||
"""Failed to validate or assign node profiles"""
|
||||
|
@ -13,6 +13,8 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from uuid import uuid4
|
||||
|
||||
import mock
|
||||
import os.path
|
||||
import tempfile
|
||||
@ -521,3 +523,190 @@ class TestCreateCephxKey(TestCase):
|
||||
def test_create_cephx_key(self):
|
||||
key = utils.create_cephx_key()
|
||||
self.assertEqual(len(key), 40)
|
||||
|
||||
|
||||
class TestNodeGetCapabilities(TestCase):
|
||||
def test_with_capabilities(self):
|
||||
node = mock.Mock(properties={'capabilities': 'x:y,foo:bar'})
|
||||
self.assertEqual({'x': 'y', 'foo': 'bar'},
|
||||
utils.node_get_capabilities(node))
|
||||
|
||||
def test_no_capabilities(self):
|
||||
node = mock.Mock(properties={})
|
||||
self.assertEqual({}, utils.node_get_capabilities(node))
|
||||
|
||||
|
||||
class TestNodeAddCapabilities(TestCase):
|
||||
def test_add(self):
|
||||
bm_client = mock.Mock()
|
||||
node = mock.Mock(uuid='uuid1', properties={})
|
||||
new_caps = utils.node_add_capabilities(bm_client, node, x='y')
|
||||
bm_client.node.update.assert_called_once_with(
|
||||
'uuid1', [{'op': 'add', 'path': '/properties/capabilities',
|
||||
'value': 'x:y'}])
|
||||
self.assertEqual('x:y', node.properties['capabilities'])
|
||||
self.assertEqual({'x': 'y'}, new_caps)
|
||||
|
||||
|
||||
class FakeFlavor(object):
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
self.profile = name
|
||||
|
||||
def get_keys(self):
|
||||
return {
|
||||
'capabilities:boot_option': 'local',
|
||||
'capabilities:profile': self.profile
|
||||
}
|
||||
|
||||
|
||||
class TestAssignVerifyProfiles(TestCase):
|
||||
def setUp(self):
|
||||
|
||||
super(TestAssignVerifyProfiles, self).setUp()
|
||||
self.bm_client = mock.Mock(spec=['node'],
|
||||
node=mock.Mock(spec=['list', 'update']))
|
||||
self.nodes = []
|
||||
self.bm_client.node.list.return_value = self.nodes
|
||||
self.flavors = {name: (FakeFlavor(name), 1)
|
||||
for name in ('compute', 'control')}
|
||||
|
||||
def _get_fake_node(self, profile=None, possible_profiles=[],
|
||||
provision_state='available'):
|
||||
caps = {'%s_profile' % p: '1'
|
||||
for p in possible_profiles}
|
||||
if profile is not None:
|
||||
caps['profile'] = profile
|
||||
caps = utils.dict_to_capabilities(caps)
|
||||
return mock.Mock(uuid=str(uuid4()),
|
||||
properties={'capabilities': caps},
|
||||
provision_state=provision_state,
|
||||
spec=['uuid', 'properties', 'provision_state'])
|
||||
|
||||
def _test(self, expected_errors, expected_warnings,
|
||||
assign_profiles=True, dry_run=False):
|
||||
errors, warnings = utils.assign_and_verify_profiles(self.bm_client,
|
||||
self.flavors,
|
||||
assign_profiles,
|
||||
dry_run)
|
||||
self.assertEqual(errors, expected_errors)
|
||||
self.assertEqual(warnings, expected_warnings)
|
||||
|
||||
def test_no_matching_without_scale(self):
|
||||
self.flavors = {name: (object(), 0)
|
||||
for name in self.flavors}
|
||||
self.nodes[:] = [self._get_fake_node(profile='fake'),
|
||||
self._get_fake_node(profile='fake')]
|
||||
|
||||
self._test(0, 0)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_exact_match(self):
|
||||
self.nodes[:] = [self._get_fake_node(profile='compute'),
|
||||
self._get_fake_node(profile='control')]
|
||||
|
||||
self._test(0, 0)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_nodes_with_no_profiles_present(self):
|
||||
self.nodes[:] = [self._get_fake_node(profile='compute'),
|
||||
self._get_fake_node(profile=None),
|
||||
self._get_fake_node(profile='foobar'),
|
||||
self._get_fake_node(profile='control')]
|
||||
|
||||
self._test(0, 1)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_more_nodes_with_profiles_present(self):
|
||||
self.nodes[:] = [self._get_fake_node(profile='compute'),
|
||||
self._get_fake_node(profile='compute'),
|
||||
self._get_fake_node(profile='compute'),
|
||||
self._get_fake_node(profile='control')]
|
||||
|
||||
self._test(0, 1)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_no_nodes(self):
|
||||
# One error per each flavor
|
||||
self._test(2, 0)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_not_enough_nodes(self):
|
||||
self.nodes[:] = [self._get_fake_node(profile='compute')]
|
||||
self._test(1, 0)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_assign_profiles(self):
|
||||
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']),
|
||||
self._get_fake_node(possible_profiles=['control']),
|
||||
self._get_fake_node(possible_profiles=['compute'])]
|
||||
|
||||
# one warning for a redundant node
|
||||
self._test(0, 1, assign_profiles=True)
|
||||
self.assertEqual(2, self.bm_client.node.update.call_count)
|
||||
|
||||
actual_profiles = [utils.node_get_capabilities(node).get('profile')
|
||||
for node in self.nodes]
|
||||
actual_profiles.sort(key=lambda x: str(x))
|
||||
self.assertEqual([None, 'compute', 'control'], actual_profiles)
|
||||
|
||||
def test_assign_profiles_multiple_options(self):
|
||||
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute',
|
||||
'control']),
|
||||
self._get_fake_node(possible_profiles=['compute',
|
||||
'control'])]
|
||||
|
||||
self._test(0, 0, assign_profiles=True)
|
||||
self.assertEqual(2, self.bm_client.node.update.call_count)
|
||||
|
||||
actual_profiles = [utils.node_get_capabilities(node).get('profile')
|
||||
for node in self.nodes]
|
||||
actual_profiles.sort(key=lambda x: str(x))
|
||||
self.assertEqual(['compute', 'control'], actual_profiles)
|
||||
|
||||
def test_assign_profiles_not_enough(self):
|
||||
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']),
|
||||
self._get_fake_node(possible_profiles=['compute']),
|
||||
self._get_fake_node(possible_profiles=['compute'])]
|
||||
|
||||
self._test(1, 1, assign_profiles=True)
|
||||
# no node update for failed flavor
|
||||
self.assertEqual(1, self.bm_client.node.update.call_count)
|
||||
|
||||
actual_profiles = [utils.node_get_capabilities(node).get('profile')
|
||||
for node in self.nodes]
|
||||
actual_profiles.sort(key=lambda x: str(x))
|
||||
self.assertEqual([None, None, 'compute'], actual_profiles)
|
||||
|
||||
def test_assign_profiles_dry_run(self):
|
||||
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute']),
|
||||
self._get_fake_node(possible_profiles=['control']),
|
||||
self._get_fake_node(possible_profiles=['compute'])]
|
||||
|
||||
self._test(0, 1, dry_run=True)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
actual_profiles = [utils.node_get_capabilities(node).get('profile')
|
||||
for node in self.nodes]
|
||||
self.assertEqual([None] * 3, actual_profiles)
|
||||
|
||||
def test_scale(self):
|
||||
# active nodes with assigned profiles are fine
|
||||
self.nodes[:] = [self._get_fake_node(profile='compute',
|
||||
provision_state='active'),
|
||||
self._get_fake_node(profile='control')]
|
||||
|
||||
self._test(0, 0, assign_profiles=True)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
||||
def test_assign_profiles_wrong_state(self):
|
||||
# active nodes are not considered for assigning profiles
|
||||
self.nodes[:] = [self._get_fake_node(possible_profiles=['compute'],
|
||||
provision_state='active'),
|
||||
self._get_fake_node(possible_profiles=['control'],
|
||||
provision_state='cleaning'),
|
||||
self._get_fake_node(profile='compute',
|
||||
provision_state='error')]
|
||||
|
||||
self._test(2, 1, assign_profiles=True)
|
||||
self.assertFalse(self.bm_client.node.update.called)
|
||||
|
@ -118,6 +118,9 @@ class TestDeployValidators(fakes.TestDeployOvercloud):
|
||||
self.uuid = uuid4()
|
||||
self.name = name
|
||||
|
||||
def get_keys(self):
|
||||
return {'capabilities:boot_option': 'local'}
|
||||
|
||||
arglist = [
|
||||
'--block-storage-flavor', 'block',
|
||||
'--block-storage-scale', '3',
|
||||
@ -136,49 +139,34 @@ class TestDeployValidators(fakes.TestDeployOvercloud):
|
||||
]
|
||||
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
|
||||
|
||||
expected_result = {
|
||||
'block': (FakeFlavor('block'), 3),
|
||||
'compute': (FakeFlavor('compute'), 3),
|
||||
'control': (FakeFlavor('control'), 1),
|
||||
'swift': (FakeFlavor('swift'), 2)
|
||||
}
|
||||
mock_flavor_list = mock.Mock(
|
||||
return_value=[
|
||||
FakeFlavor('block'),
|
||||
FakeFlavor('compute'),
|
||||
FakeFlavor('control'),
|
||||
FakeFlavor('swift'),
|
||||
flavor for flavor, scale in expected_result.values()
|
||||
]
|
||||
)
|
||||
mock_flavors = mock.Mock()
|
||||
mock_flavors.attach_mock(mock_flavor_list, 'list')
|
||||
self.app.client_manager.compute.attach_mock(mock_flavors, 'flavors')
|
||||
|
||||
self.cmd._check_flavors_exist(parsed_args)
|
||||
result = self.cmd._collect_flavors(parsed_args)
|
||||
self.assertEqual(self.cmd.predeploy_errors, 0)
|
||||
self.assertEqual(self.cmd.predeploy_warnings, 0)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
||||
del expected_result['swift']
|
||||
mock_flavor_list_no_swift = mock.Mock(
|
||||
return_value=[
|
||||
FakeFlavor('block'),
|
||||
FakeFlavor('compute'),
|
||||
FakeFlavor('control'),
|
||||
flavor for flavor, scale in expected_result.values()
|
||||
]
|
||||
)
|
||||
mock_flavors.attach_mock(mock_flavor_list_no_swift, 'list')
|
||||
self.cmd._check_flavors_exist(parsed_args)
|
||||
self.assertEqual(self.cmd.predeploy_errors, 1)
|
||||
self.assertEqual(self.cmd.predeploy_warnings, 0)
|
||||
|
||||
def test_check_profiles(self):
|
||||
flavor_profile_map = {'ceph-flavor': 'ceph-profile'}
|
||||
node_profile_map = {
|
||||
None: ['e0e6a290-2321-4981-8a76-b230284119c2'],
|
||||
'ceph-profile': ['ea7d8a81-5e7c-4696-bd1e-8ee83da5b816']
|
||||
}
|
||||
|
||||
self.cmd._check_profiles('ceph-storage', 'ceph-flavor', 1,
|
||||
flavor_profile_map,
|
||||
node_profile_map)
|
||||
self.assertEqual(self.cmd.predeploy_errors, 0)
|
||||
self.assertEqual(self.cmd.predeploy_warnings, 0)
|
||||
|
||||
self.cmd._check_profiles('ceph-storage', 'ceph-flavor', 2,
|
||||
flavor_profile_map,
|
||||
node_profile_map)
|
||||
result = self.cmd._collect_flavors(parsed_args)
|
||||
self.assertEqual(self.cmd.predeploy_errors, 1)
|
||||
self.assertEqual(self.cmd.predeploy_warnings, 0)
|
||||
self.assertEqual(expected_result, result)
|
||||
|
126
tripleoclient/tests/v1/test_overcloud_profiles.py
Normal file
126
tripleoclient/tests/v1/test_overcloud_profiles.py
Normal file
@ -0,0 +1,126 @@
|
||||
# Copyright 2015 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from tripleoclient import exceptions
|
||||
from tripleoclient.tests import test_utils
|
||||
from tripleoclient.tests.v1 import test_plugin
|
||||
from tripleoclient import utils
|
||||
from tripleoclient.v1 import overcloud_profiles
|
||||
|
||||
|
||||
@mock.patch.object(utils, 'assign_and_verify_profiles', autospec=True)
|
||||
class TestMatchProfiles(test_plugin.TestPluginV1):
|
||||
def setUp(self):
|
||||
super(TestMatchProfiles, self).setUp()
|
||||
self.cmd = overcloud_profiles.MatchProfiles(self.app, None)
|
||||
self.app.client_manager.tripleoclient = mock.Mock()
|
||||
self.app.client_manager.compute = mock.Mock()
|
||||
self.flavors = [
|
||||
test_utils.FakeFlavor('compute'),
|
||||
test_utils.FakeFlavor('control'),
|
||||
]
|
||||
self.app.client_manager.compute.flavors.list.return_value = (
|
||||
self.flavors)
|
||||
|
||||
def test_ok(self, mock_assign):
|
||||
mock_assign.return_value = (0, 0)
|
||||
|
||||
arglist = [
|
||||
'--compute-flavor', 'compute',
|
||||
'--compute-scale', '3',
|
||||
'--control-flavor', 'control',
|
||||
'--control-scale', '1',
|
||||
]
|
||||
parsed_args = self.check_parser(self.cmd, arglist, [])
|
||||
|
||||
self.cmd.take_action(parsed_args)
|
||||
|
||||
mock_assign.assert_called_once_with(
|
||||
self.app.client_manager.tripleoclient.baremetal,
|
||||
{'compute': (self.flavors[0], 3),
|
||||
'control': (self.flavors[1], 1)},
|
||||
assign_profiles=True, dry_run=False)
|
||||
|
||||
def test_failed(self, mock_assign):
|
||||
mock_assign.return_value = (2, 0)
|
||||
|
||||
arglist = [
|
||||
'--compute-flavor', 'compute',
|
||||
'--compute-scale', '3',
|
||||
'--control-flavor', 'control',
|
||||
'--control-scale', '1',
|
||||
]
|
||||
parsed_args = self.check_parser(self.cmd, arglist, [])
|
||||
|
||||
self.assertRaises(exceptions.ProfileMatchingError,
|
||||
self.cmd.take_action, parsed_args)
|
||||
|
||||
mock_assign.assert_called_once_with(
|
||||
self.app.client_manager.tripleoclient.baremetal,
|
||||
{'compute': (self.flavors[0], 3),
|
||||
'control': (self.flavors[1], 1)},
|
||||
assign_profiles=True, dry_run=False)
|
||||
|
||||
def test_dry_run(self, mock_assign):
|
||||
mock_assign.return_value = (0, 0)
|
||||
|
||||
arglist = [
|
||||
'--compute-flavor', 'compute',
|
||||
'--compute-scale', '3',
|
||||
'--control-flavor', 'control',
|
||||
'--control-scale', '1',
|
||||
'--dry-run'
|
||||
]
|
||||
parsed_args = self.check_parser(self.cmd, arglist, [])
|
||||
|
||||
self.cmd.take_action(parsed_args)
|
||||
|
||||
mock_assign.assert_called_once_with(
|
||||
self.app.client_manager.tripleoclient.baremetal,
|
||||
{'compute': (self.flavors[0], 3),
|
||||
'control': (self.flavors[1], 1)},
|
||||
assign_profiles=True, dry_run=True)
|
||||
|
||||
|
||||
class TestListProfiles(test_plugin.TestPluginV1):
|
||||
def setUp(self):
|
||||
super(TestListProfiles, self).setUp()
|
||||
self.cmd = overcloud_profiles.ListProfiles(self.app, None)
|
||||
self.app.client_manager.tripleoclient = mock.Mock()
|
||||
self.nodes = [
|
||||
mock.Mock(uuid='uuid1', provision_state='active',
|
||||
properties={}),
|
||||
mock.Mock(uuid='uuid2', provision_state='enroll',
|
||||
properties={'capabilities': 'profile:compute'}),
|
||||
mock.Mock(uuid='uuid3', provision_state='available',
|
||||
properties={'capabilities': 'profile:compute,'
|
||||
'compute_profile:1,control_profile:true'}),
|
||||
mock.Mock(uuid='uuid4', provision_state='available',
|
||||
properties={'capabilities': 'profile:compute,'
|
||||
'compute_profile:0'}),
|
||||
]
|
||||
self.bm_client = self.app.client_manager.tripleoclient.baremetal
|
||||
self.bm_client.node.list.return_value = self.nodes
|
||||
|
||||
def test_list(self):
|
||||
result = self.cmd.take_action(None)
|
||||
self.assertEqual(5, len(result[0]))
|
||||
self.assertEqual(
|
||||
[('uuid1', self.nodes[0].name, 'active', None, ''),
|
||||
('uuid3', self.nodes[2].name, 'available', 'compute',
|
||||
'compute, control'),
|
||||
('uuid4', self.nodes[3].name, 'available', 'compute', '')],
|
||||
result[1])
|
@ -27,6 +27,7 @@ import time
|
||||
|
||||
from heatclient.common import event_utils
|
||||
from heatclient.exc import HTTPNotFound
|
||||
from openstackclient.i18n import _
|
||||
from six.moves import configparser
|
||||
from six.moves import urllib
|
||||
|
||||
@ -279,7 +280,7 @@ def wait_for_provision_state(baremetal_client, node_uuid, provision_state,
|
||||
:raises exceptions.StateTransitionFailed: if node.last_error is set
|
||||
"""
|
||||
|
||||
for _ in range(0, loops):
|
||||
for _l in range(0, loops):
|
||||
|
||||
node = baremetal_client.node.get(node_uuid)
|
||||
|
||||
@ -336,7 +337,7 @@ def wait_for_node_introspection(inspector_client, auth_token, inspector_url,
|
||||
log = logging.getLogger(__name__ + ".wait_for_node_introspection")
|
||||
node_uuids = node_uuids[:]
|
||||
|
||||
for _ in range(0, loops):
|
||||
for _l in range(0, loops):
|
||||
|
||||
for node_uuid in node_uuids:
|
||||
status = inspector_client.get_status(
|
||||
@ -605,3 +606,184 @@ def ensure_run_as_normal_user():
|
||||
raise exceptions.RootUserExecution(
|
||||
'This command cannot run under root user.'
|
||||
' Switch to a normal user.')
|
||||
|
||||
|
||||
def capabilities_to_dict(caps):
|
||||
"""Convert the Node's capabilities into a dictionary."""
|
||||
if not caps:
|
||||
return {}
|
||||
return dict([key.split(':', 1) for key in caps.split(',')])
|
||||
|
||||
|
||||
def dict_to_capabilities(caps_dict):
|
||||
"""Convert a dictionary into a string with the capabilities syntax."""
|
||||
return ','.join(["%s:%s" % (key, value)
|
||||
for key, value in caps_dict.items()
|
||||
if value is not None])
|
||||
|
||||
|
||||
def node_get_capabilities(node):
|
||||
"""Get node capabilities."""
|
||||
return capabilities_to_dict(node.properties.get('capabilities'))
|
||||
|
||||
|
||||
def node_add_capabilities(bm_client, node, **updated):
|
||||
"""Add or replace capabilities for a node."""
|
||||
caps = node_get_capabilities(node)
|
||||
caps.update(updated)
|
||||
converted_caps = dict_to_capabilities(caps)
|
||||
node.properties['capabilities'] = converted_caps
|
||||
bm_client.node.update(node.uuid, [{'op': 'add',
|
||||
'path': '/properties/capabilities',
|
||||
'value': converted_caps}])
|
||||
return caps
|
||||
|
||||
|
||||
def assign_and_verify_profiles(bm_client, flavors,
|
||||
assign_profiles=False, dry_run=False):
|
||||
"""Assign and verify profiles for given flavors.
|
||||
|
||||
:param bm_client: ironic client instance
|
||||
:param flavors: map flavor name -> (flavor object, required count)
|
||||
:param assign_profiles: whether to allow assigning profiles to nodes
|
||||
:param dry_run: whether to skip applying actual changes (only makes sense
|
||||
if assign_profiles is True)
|
||||
:returns: tuple (errors count, warnings count)
|
||||
"""
|
||||
log = logging.getLogger(__name__ + ".assign_and_verify_profiles")
|
||||
predeploy_errors = 0
|
||||
predeploy_warnings = 0
|
||||
|
||||
# nodes available for deployment and scaling (including active)
|
||||
bm_nodes = {node.uuid: node
|
||||
for node in bm_client.node.list(maintenance=False,
|
||||
detail=True)
|
||||
if node.provision_state in ('available', 'active')}
|
||||
# create a pool of unprocessed nodes and record their capabilities
|
||||
free_node_caps = {uu: node_get_capabilities(node)
|
||||
for uu, node in bm_nodes.items()}
|
||||
|
||||
# TODO(dtantsur): use command-line arguments to specify the order in
|
||||
# which profiles are processed (might matter for assigning profiles)
|
||||
for flavor_name, (flavor, scale) in flavors.items():
|
||||
if not scale:
|
||||
log.debug("Skipping verification of flavor %s because "
|
||||
"none will be deployed", flavor_name)
|
||||
continue
|
||||
|
||||
profile = flavor.get_keys().get('capabilities:profile')
|
||||
if not profile:
|
||||
predeploy_errors += 1
|
||||
log.error(
|
||||
'Error: The %s flavor has no profile associated', flavor_name)
|
||||
log.error(
|
||||
'Recommendation: assign a profile with openstack flavor '
|
||||
'set --property "capabilities:profile"="PROFILE_NAME" %s',
|
||||
flavor_name)
|
||||
continue
|
||||
|
||||
# first collect nodes with known profiles
|
||||
assigned_nodes = [uu for uu, caps in free_node_caps.items()
|
||||
if caps.get('profile') == profile]
|
||||
required_count = scale - len(assigned_nodes)
|
||||
|
||||
if required_count < 0:
|
||||
log.warning('%d nodes with profile %s won\'t be used '
|
||||
'for deployment now', -required_count, profile)
|
||||
predeploy_warnings += 1
|
||||
required_count = 0
|
||||
elif required_count > 0 and assign_profiles:
|
||||
# find more nodes by checking XXX_profile capabilities that are
|
||||
# set by ironic-inspector or manually
|
||||
capability = '%s_profile' % profile
|
||||
more_nodes = [
|
||||
uu for uu, caps in free_node_caps.items()
|
||||
# use only nodes without a know profile
|
||||
if not caps.get('profile')
|
||||
and caps.get(capability, '').lower() in ('1', 'true')
|
||||
# do not assign profiles for active nodes
|
||||
and bm_nodes[uu].provision_state == 'available'
|
||||
][:required_count]
|
||||
assigned_nodes.extend(more_nodes)
|
||||
required_count -= len(more_nodes)
|
||||
|
||||
for uu in assigned_nodes:
|
||||
# make sure these nodes are not reused for other profiles
|
||||
node_caps = free_node_caps.pop(uu)
|
||||
# save profile for newly assigned nodes, but only if we
|
||||
# succeeded in finding enough of them
|
||||
if not required_count and not node_caps.get('profile'):
|
||||
node = bm_nodes[uu]
|
||||
if not dry_run:
|
||||
node_add_capabilities(bm_client, node, profile=profile)
|
||||
log.info('Node %s was assigned profile %s', uu, profile)
|
||||
else:
|
||||
log.debug('Node %s has profile %s', uu, profile)
|
||||
|
||||
if required_count > 0:
|
||||
log.error(
|
||||
"Error: only %s of %s requested ironic nodes are tagged "
|
||||
"to profile %s (for flavor %s)",
|
||||
scale - required_count, scale, profile, flavor_name
|
||||
)
|
||||
log.error(
|
||||
"Recommendation: tag more nodes using ironic node-update "
|
||||
"<NODE ID> replace properties/capabilities=profile:%s,"
|
||||
"boot_option:local", profile)
|
||||
predeploy_errors += 1
|
||||
|
||||
nodes_without_profile = [uu for uu, caps in free_node_caps.items()
|
||||
if not caps.get('profile')]
|
||||
if nodes_without_profile:
|
||||
predeploy_warnings += 1
|
||||
log.warning(
|
||||
"There are %d ironic nodes with no profile that will "
|
||||
"not be used: %s", len(nodes_without_profile),
|
||||
', '.join(nodes_without_profile)
|
||||
)
|
||||
|
||||
return predeploy_errors, predeploy_warnings
|
||||
|
||||
|
||||
def add_deployment_plan_arguments(parser):
|
||||
"""Add deployment plan arguments (flavors and scales) to a parser"""
|
||||
parser.add_argument('--control-scale', type=int,
|
||||
help=_('New number of control nodes.'))
|
||||
parser.add_argument('--compute-scale', type=int,
|
||||
help=_('New number of compute nodes.'))
|
||||
parser.add_argument('--ceph-storage-scale', type=int,
|
||||
help=_('New number of ceph storage nodes.'))
|
||||
parser.add_argument('--block-storage-scale', type=int,
|
||||
help=_('New number of cinder storage nodes.'))
|
||||
parser.add_argument('--swift-storage-scale', type=int,
|
||||
help=_('New number of swift storage nodes.'))
|
||||
parser.add_argument('--control-flavor',
|
||||
help=_("Nova flavor to use for control nodes."))
|
||||
parser.add_argument('--compute-flavor',
|
||||
help=_("Nova flavor to use for compute nodes."))
|
||||
parser.add_argument('--ceph-storage-flavor',
|
||||
help=_("Nova flavor to use for ceph storage "
|
||||
"nodes."))
|
||||
parser.add_argument('--block-storage-flavor',
|
||||
help=_("Nova flavor to use for cinder storage "
|
||||
"nodes."))
|
||||
parser.add_argument('--swift-storage-flavor',
|
||||
help=_("Nova flavor to use for swift storage "
|
||||
"nodes."))
|
||||
|
||||
|
||||
def get_roles_info(parsed_args):
|
||||
"""Get flavor name and scale for all deployment roles.
|
||||
|
||||
:returns: dict role name -> (flavor name, scale)
|
||||
"""
|
||||
return {
|
||||
'control': (parsed_args.control_flavor, parsed_args.control_scale),
|
||||
'compute': (parsed_args.compute_flavor, parsed_args.compute_scale),
|
||||
'ceph-storage': (parsed_args.ceph_storage_flavor,
|
||||
parsed_args.ceph_storage_scale),
|
||||
'block-storage': (parsed_args.block_storage_flavor,
|
||||
parsed_args.block_storage_scale),
|
||||
'swift-storage': (parsed_args.swift_storage_flavor,
|
||||
parsed_args.swift_storage_scale)
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import glob
|
||||
import json
|
||||
import logging
|
||||
@ -525,53 +524,17 @@ class DeployOvercloud(command.Command):
|
||||
|
||||
self._check_boot_images()
|
||||
|
||||
self._check_flavors_exist(parsed_args)
|
||||
flavors = self._collect_flavors(parsed_args)
|
||||
|
||||
self._check_ironic_boot_configuration(bm_client)
|
||||
|
||||
flavor_profile_map = self._collect_flavor_profiles([
|
||||
parsed_args.control_flavor,
|
||||
parsed_args.compute_flavor,
|
||||
parsed_args.ceph_storage_flavor,
|
||||
parsed_args.block_storage_flavor,
|
||||
parsed_args.swift_storage_flavor,
|
||||
])
|
||||
node_profile_map = self._collect_node_profiles()
|
||||
|
||||
for target, flavor, scale in [
|
||||
('control', parsed_args.control_flavor,
|
||||
parsed_args.control_scale),
|
||||
('compute', parsed_args.compute_flavor,
|
||||
parsed_args.compute_scale),
|
||||
('ceph-storage', parsed_args.ceph_storage_flavor,
|
||||
parsed_args.ceph_storage_scale),
|
||||
('block-storage', parsed_args.block_storage_flavor,
|
||||
parsed_args.block_storage_scale),
|
||||
('swift-storage', parsed_args.swift_storage_flavor,
|
||||
parsed_args.swift_storage_scale),
|
||||
]:
|
||||
if scale == 0 or flavor is None:
|
||||
self.log.debug("Skipping verification of %s profiles because "
|
||||
"none will be deployed", flavor)
|
||||
continue
|
||||
self._check_profiles(
|
||||
target, flavor, scale,
|
||||
flavor_profile_map,
|
||||
node_profile_map)
|
||||
|
||||
if (node_profile_map.get(None) and
|
||||
any([parsed_args.block_storage_flavor,
|
||||
parsed_args.ceph_storage_flavor,
|
||||
parsed_args.compute_flavor,
|
||||
parsed_args.control_flavor,
|
||||
parsed_args.swift_storage_flavor])):
|
||||
self.predeploy_warnings += 1
|
||||
self.log.warning(
|
||||
"There are %d ironic nodes with no profile that will "
|
||||
"not be used: %s",
|
||||
len(node_profile_map[None]),
|
||||
', '.join(node_profile_map[None])
|
||||
)
|
||||
errors, warnings = utils.assign_and_verify_profiles(
|
||||
bm_client, flavors,
|
||||
assign_profiles=False,
|
||||
dry_run=parsed_args.dry_run
|
||||
)
|
||||
self.predeploy_errors += errors
|
||||
self.predeploy_warnings += warnings
|
||||
|
||||
return self.predeploy_errors, self.predeploy_warnings
|
||||
|
||||
@ -611,82 +574,6 @@ class DeployOvercloud(command.Command):
|
||||
self.__ramdisk_id = ramdisk_id
|
||||
return kernel_id, ramdisk_id
|
||||
|
||||
def _collect_node_profiles(self):
|
||||
"""Gather a map of profile -> [node_uuid] for ironic boot profiles"""
|
||||
bm_client = self.app.client_manager.tripleoclient.baremetal
|
||||
|
||||
# map of profile capability -> [node_uuid, ...]
|
||||
profile_map = collections.defaultdict(list)
|
||||
|
||||
for node in bm_client.node.list(maintenance=False):
|
||||
node = bm_client.node.get(node.uuid)
|
||||
profiles = re.findall(r'profile:(.*?)(?:,|$)',
|
||||
node.properties.get('capabilities', ''))
|
||||
if not profiles:
|
||||
profile_map[None].append(node.uuid)
|
||||
for p in profiles:
|
||||
profile_map[p].append(node.uuid)
|
||||
|
||||
return dict(profile_map)
|
||||
|
||||
def _collect_flavor_profiles(self, flavors):
|
||||
compute_client = self.app.client_manager.compute
|
||||
|
||||
flavor_profiles = {}
|
||||
|
||||
for flavor in compute_client.flavors.list():
|
||||
if flavor.name not in flavors:
|
||||
self.log.debug("Flavor {} isn't used in this deployment, "
|
||||
"skipping it".format(flavor.name))
|
||||
continue
|
||||
|
||||
profile = flavor.get_keys().get('capabilities:profile')
|
||||
if profile == '':
|
||||
flavor_profiles[flavor.name] = None
|
||||
else:
|
||||
flavor_profiles[flavor.name] = profile
|
||||
|
||||
if flavor.get_keys().get('capabilities:boot_option', '') \
|
||||
!= 'local':
|
||||
self.predeploy_warnings += 1
|
||||
self.log.error(
|
||||
'Flavor %s "capabilities:boot_option" is not set to '
|
||||
'"local". Nodes must have ability to PXE boot from '
|
||||
'deploy image.', flavor.name)
|
||||
self.log.error(
|
||||
'Recommended solution: openstack flavor set --property '
|
||||
'"cpu_arch"="x86_64" --property '
|
||||
'"capabilities:boot_option"="local" ' + flavor.name)
|
||||
|
||||
return flavor_profiles
|
||||
|
||||
def _check_profiles(self, target, flavor, scale,
|
||||
flavor_profile_map,
|
||||
node_profile_map):
|
||||
if flavor_profile_map.get(flavor) is None:
|
||||
self.predeploy_errors += 1
|
||||
self.log.error(
|
||||
'Warning: The flavor selected for --%s-flavor "%s" has no '
|
||||
'profile associated', target, flavor)
|
||||
self.log.error(
|
||||
'Recommendation: assign a profile with openstack flavor set '
|
||||
'--property "capabilities:profile"="PROFILE_NAME" %s',
|
||||
flavor)
|
||||
return
|
||||
|
||||
if len(node_profile_map.get(flavor_profile_map[flavor], [])) < scale:
|
||||
self.predeploy_errors += 1
|
||||
self.log.error(
|
||||
"Error: %s of %s requested ironic nodes tagged to profile %s "
|
||||
"(for flavor %s)",
|
||||
len(node_profile_map.get(flavor_profile_map[flavor], [])),
|
||||
scale, flavor_profile_map[flavor], flavor
|
||||
)
|
||||
self.log.error(
|
||||
"Recommendation: tag more nodes using ironic node-update "
|
||||
"<NODE ID> replace properties/capabilities=profile:%s,"
|
||||
"boot_option:local", flavor_profile_map[flavor])
|
||||
|
||||
def _check_boot_images(self):
|
||||
kernel_id, ramdisk_id = self._image_ids()
|
||||
message = ("No image with the name '{}' found - make "
|
||||
@ -698,31 +585,50 @@ class DeployOvercloud(command.Command):
|
||||
self.predeploy_errors += 1
|
||||
self.log.error(message.format('bm-deploy-ramdisk'))
|
||||
|
||||
def _check_flavors_exist(self, parsed_args):
|
||||
"""Ensure that selected flavors (--ROLE-flavor) exist in nova."""
|
||||
def _collect_flavors(self, parsed_args):
|
||||
"""Validate and collect nova flavors in use.
|
||||
|
||||
Ensure that selected flavors (--ROLE-flavor) are valid in nova.
|
||||
Issue a warning of local boot is not set for a flavor.
|
||||
|
||||
:returns: dictionary flavor name -> (flavor object, scale)
|
||||
"""
|
||||
compute_client = self.app.client_manager.compute
|
||||
|
||||
flavors = {f.name: f for f in compute_client.flavors.list()}
|
||||
result = {}
|
||||
|
||||
message = "Provided --{}-flavor, '{}', does not exist"
|
||||
|
||||
for target, flavor, scale in (
|
||||
('control', parsed_args.control_flavor,
|
||||
parsed_args.control_scale),
|
||||
('compute', parsed_args.compute_flavor,
|
||||
parsed_args.compute_scale),
|
||||
('ceph-storage', parsed_args.ceph_storage_flavor,
|
||||
parsed_args.ceph_storage_scale),
|
||||
('block-storage', parsed_args.block_storage_flavor,
|
||||
parsed_args.block_storage_scale),
|
||||
('swift-storage', parsed_args.swift_storage_flavor,
|
||||
parsed_args.swift_storage_scale),
|
||||
for target, (flavor_name, scale) in (
|
||||
utils.get_roles_info(parsed_args).items()
|
||||
):
|
||||
if flavor is None or scale == 0:
|
||||
if flavor_name is None or not scale:
|
||||
self.log.debug("--{}-flavor not used".format(target))
|
||||
elif flavor not in flavors:
|
||||
continue
|
||||
|
||||
try:
|
||||
flavor = flavors[flavor_name]
|
||||
except KeyError:
|
||||
self.predeploy_errors += 1
|
||||
self.log.error(message.format(target, flavor))
|
||||
self.log.error(message.format(target, flavor_name))
|
||||
continue
|
||||
|
||||
if flavor.get_keys().get('capabilities:boot_option', '') \
|
||||
!= 'local':
|
||||
self.predeploy_warnings += 1
|
||||
self.log.warning(
|
||||
'Flavor %s "capabilities:boot_option" is not set to '
|
||||
'"local". Nodes must have ability to PXE boot from '
|
||||
'deploy image.', flavor_name)
|
||||
self.log.warning(
|
||||
'Recommended solution: openstack flavor set --property '
|
||||
'"cpu_arch"="x86_64" --property '
|
||||
'"capabilities:boot_option"="local" ' + flavor_name)
|
||||
|
||||
result[flavor_name] = (flavor, scale)
|
||||
|
||||
return result
|
||||
|
||||
def _check_ironic_boot_configuration(self, bm_client):
|
||||
for node in bm_client.node.list(detail=True, maintenance=False):
|
||||
@ -778,29 +684,7 @@ class DeployOvercloud(command.Command):
|
||||
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
|
||||
type=int, default=240,
|
||||
help=_('Deployment timeout in minutes.'))
|
||||
parser.add_argument('--control-scale', type=int,
|
||||
help=_('New number of control nodes.'))
|
||||
parser.add_argument('--compute-scale', type=int,
|
||||
help=_('New number of compute nodes.'))
|
||||
parser.add_argument('--ceph-storage-scale', type=int,
|
||||
help=_('New number of ceph storage nodes.'))
|
||||
parser.add_argument('--block-storage-scale', type=int,
|
||||
help=_('New number of cinder storage nodes.'))
|
||||
parser.add_argument('--swift-storage-scale', type=int,
|
||||
help=_('New number of swift storage nodes.'))
|
||||
parser.add_argument('--control-flavor',
|
||||
help=_("Nova flavor to use for control nodes."))
|
||||
parser.add_argument('--compute-flavor',
|
||||
help=_("Nova flavor to use for compute nodes."))
|
||||
parser.add_argument('--ceph-storage-flavor',
|
||||
help=_("Nova flavor to use for ceph storage "
|
||||
"nodes."))
|
||||
parser.add_argument('--block-storage-flavor',
|
||||
help=_("Nova flavor to use for cinder storage "
|
||||
"nodes."))
|
||||
parser.add_argument('--swift-storage-flavor',
|
||||
help=_("Nova flavor to use for swift storage "
|
||||
"nodes."))
|
||||
utils.add_deployment_plan_arguments(parser)
|
||||
parser.add_argument('--neutron-flat-networks',
|
||||
help=_('Comma separated list of physical_network '
|
||||
'names with which flat networks can be '
|
||||
|
119
tripleoclient/v1/overcloud_profiles.py
Normal file
119
tripleoclient/v1/overcloud_profiles.py
Normal file
@ -0,0 +1,119 @@
|
||||
# Copyright 2015 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from cliff import command
|
||||
from cliff import lister
|
||||
from openstackclient.i18n import _
|
||||
|
||||
from tripleoclient import exceptions
|
||||
from tripleoclient import utils
|
||||
|
||||
|
||||
class MatchProfiles(command.Command):
|
||||
"""Assign and validate profiles on nodes"""
|
||||
|
||||
log = logging.getLogger(__name__ + ".MatchProfiles")
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(MatchProfiles, self).get_parser(prog_name)
|
||||
parser.add_argument(
|
||||
'--dry-run',
|
||||
action='store_true',
|
||||
default=False,
|
||||
help=_('Only run validations, but do not apply any changes.')
|
||||
)
|
||||
utils.add_deployment_plan_arguments(parser)
|
||||
return parser
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)" % parsed_args)
|
||||
bm_client = self.app.client_manager.tripleoclient.baremetal
|
||||
|
||||
flavors = self._collect_flavors(parsed_args)
|
||||
|
||||
errors, warnings = utils.assign_and_verify_profiles(
|
||||
bm_client, flavors,
|
||||
assign_profiles=True,
|
||||
dry_run=parsed_args.dry_run
|
||||
)
|
||||
if errors:
|
||||
raise exceptions.ProfileMatchingError(
|
||||
_('Failed to validate and assign profiles.'))
|
||||
|
||||
def _collect_flavors(self, parsed_args):
|
||||
"""Collect nova flavors in use.
|
||||
|
||||
:returns: dictionary flavor name -> (flavor object, scale)
|
||||
"""
|
||||
compute_client = self.app.client_manager.compute
|
||||
|
||||
flavors = {f.name: f for f in compute_client.flavors.list()}
|
||||
result = {}
|
||||
|
||||
message = "Provided --{}-flavor, '{}', does not exist"
|
||||
|
||||
for target, (flavor_name, scale) in (
|
||||
utils.get_roles_info(parsed_args).items()
|
||||
):
|
||||
if flavor_name is None or not scale:
|
||||
self.log.debug("--{}-flavor not used".format(target))
|
||||
continue
|
||||
|
||||
try:
|
||||
flavor = flavors[flavor_name]
|
||||
except KeyError:
|
||||
raise exceptions.ProfileMatchingError(
|
||||
message.format(target, flavor_name))
|
||||
|
||||
result[flavor_name] = (flavor, scale)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
POSTFIX = '_profile'
|
||||
|
||||
|
||||
class ListProfiles(lister.Lister):
|
||||
"""List overcloud node profiles"""
|
||||
|
||||
log = logging.getLogger(__name__ + ".ListProfiles")
|
||||
|
||||
def take_action(self, parsed_args):
|
||||
self.log.debug("take_action(%s)" % parsed_args)
|
||||
client = self.app.client_manager.tripleoclient.baremetal
|
||||
|
||||
result = []
|
||||
|
||||
for node in client.node.list(detail=True, maintenance=False):
|
||||
if node.provision_state not in ('active', 'available'):
|
||||
continue
|
||||
|
||||
caps = utils.node_get_capabilities(node)
|
||||
profile = caps.get('profile')
|
||||
possible_profiles = [k[:-len(POSTFIX)]
|
||||
for k, v in caps.items()
|
||||
if k.endswith(POSTFIX) and
|
||||
v.lower() in ('1', 'true')]
|
||||
# sorting for convenient display and testing
|
||||
possible_profiles.sort()
|
||||
result.append((node.uuid, node.name or '', node.provision_state,
|
||||
profile, ', '.join(possible_profiles)))
|
||||
|
||||
return (
|
||||
("Node UUID", "Node Name", "Provision State", "Current Profile",
|
||||
"Possible Profiles"),
|
||||
result
|
||||
)
|
Loading…
Reference in New Issue
Block a user