Merge "Allow filtering by arbitrary predicate and conductor_group"

This commit is contained in:
Zuul 2018-09-06 10:41:36 +00:00 committed by Gerrit Code Review
commit 3ce961db8c
12 changed files with 235 additions and 112 deletions

View File

@ -56,7 +56,9 @@ def _do_deploy(api, args, formatter):
if args.user_name:
config.add_user(args.user_name, sudo=args.passwordless_sudo)
node = api.reserve_node(args.resource_class, capabilities=capabilities,
node = api.reserve_node(resource_class=args.resource_class,
conductor_group=args.conductor_group,
capabilities=capabilities,
candidates=args.candidate)
instance = api.provision_node(node,
image=args.image,
@ -138,6 +140,8 @@ def _parse_args(args, config):
'Node\'s name or UUID')
deploy.add_argument('--resource-class',
help='node resource class to deploy')
deploy.add_argument('--conductor-group',
help='conductor group to pick the node from')
deploy.add_argument('--candidate', action='append',
help='A candidate node to use for scheduling (can be '
'specified several times)')

View File

@ -24,9 +24,6 @@ from metalsmith import _utils
LOG = logging.getLogger(__name__)
NODE_FIELDS = ['name', 'uuid', 'instance_info', 'instance_uuid', 'maintenance',
'maintenance_reason', 'properties', 'provision_state', 'extra',
'last_error']
HOSTNAME_FIELD = 'metalsmith_hostname'
@ -57,7 +54,9 @@ class API(object):
"""Various OpenStack API's."""
IRONIC_VERSION = '1'
IRONIC_MICRO_VERSION = '1.28'
# TODO(dtantsur): use openstacksdk and stop hardcoding this here.
# 1.46 (Rocky) adds conductor_group.
IRONIC_MICRO_VERSION = '1.46'
_node_list = None
@ -139,7 +138,7 @@ class API(object):
if by_hostname is not None:
return by_hostname
return self.ironic.node.get(node, fields=NODE_FIELDS)
return self.ironic.node.get(node)
elif hasattr(node, 'node'):
# Instance object
node = node.node
@ -147,7 +146,7 @@ class API(object):
node = node
if refresh:
return self.ironic.node.get(node.uuid, fields=NODE_FIELDS)
return self.ironic.node.get(node.uuid)
else:
return node
@ -161,14 +160,14 @@ class API(object):
def list_node_ports(self, node):
return self.ironic.node.list_ports(_node_id(node), limit=0)
def list_nodes(self, resource_class=None, maintenance=False,
associated=False, provision_state='available',
fields=None):
return self.ironic.node.list(limit=0, resource_class=resource_class,
maintenance=maintenance,
def list_nodes(self, maintenance=False, associated=False,
provision_state='available', **filters):
if 'fields' not in filters:
filters['detail'] = True
return self.ironic.node.list(limit=0, maintenance=maintenance,
associated=associated,
provision_state=provision_state,
fields=fields or NODE_FIELDS)
**filters)
def node_action(self, node, action, **kwargs):
self.ironic.node.set_provision_state(_node_id(node), action, **kwargs)

View File

@ -50,8 +50,8 @@ class Provisioner(object):
self._api = _os_api.API(session=session, cloud_region=cloud_region)
self._dry_run = dry_run
def reserve_node(self, resource_class=None, capabilities=None,
candidates=None):
def reserve_node(self, resource_class=None, conductor_group=None,
capabilities=None, candidates=None, predicate=None):
"""Find and reserve a suitable node.
Example::
@ -61,11 +61,17 @@ class Provisioner(object):
:param resource_class: Requested resource class. If ``None``, a node
with any resource class can be chosen.
:param conductor_group: Conductor group to pick the nodes from.
Value ``None`` means any group, use empty string "" for nodes
from the default group.
:param capabilities: Requested capabilities as a dict.
:param candidates: List of nodes (UUIDs, names or `Node` objects)
to pick from. The filters (for resource class and capabilities)
are still applied to the provided list. The order in which
the nodes are considered is retained.
:param predicate: Custom predicate to run on nodes. A callable that
accepts a node and returns ``True`` if it should be included,
``False`` otherwise. Any exceptions are propagated to the caller.
:return: reserved `Node` object.
:raises: :py:class:`metalsmith.exceptions.ReservationFailed`
"""
@ -76,22 +82,28 @@ class Provisioner(object):
if resource_class:
nodes = [node for node in nodes
if node.resource_class == resource_class]
if conductor_group is not None:
nodes = [node for node in nodes
if node.conductor_group == conductor_group]
else:
nodes = self._api.list_nodes(resource_class=resource_class)
nodes = self._api.list_nodes(resource_class=resource_class,
conductor_group=conductor_group)
# Ensure parallel executions don't try nodes in the same sequence
random.shuffle(nodes)
if not nodes:
raise exceptions.ResourceClassNotFound(resource_class,
capabilities)
raise exceptions.NodesNotFound(resource_class, conductor_group)
LOG.debug('Ironic nodes: %s', nodes)
filters = [_scheduler.CapabilitiesFilter(resource_class, capabilities),
_scheduler.ValidationFilter(self._api,
resource_class, capabilities)]
reserver = _scheduler.IronicReserver(self._api, resource_class,
capabilities)
filters = [_scheduler.CapabilitiesFilter(capabilities),
_scheduler.ValidationFilter(self._api)]
if predicate is not None:
# NOTE(dtantsur): run the provided predicate before the validation,
# since validation requires network interactions.
filters.insert(-1, predicate)
reserver = _scheduler.IronicReserver(self._api)
node = _scheduler.schedule_node(nodes, filters, reserver,
dry_run=self._dry_run)
if capabilities:

View File

@ -117,8 +117,7 @@ def schedule_node(nodes, filters, reserver, dry_run=False):
class CapabilitiesFilter(Filter):
"""Filter that checks capabilities."""
def __init__(self, resource_class, capabilities):
self._resource_class = resource_class
def __init__(self, capabilities):
self._capabilities = capabilities
self._counter = collections.Counter()
@ -159,20 +158,16 @@ class CapabilitiesFilter(Filter):
message = ("No available nodes found with capabilities %(req)s, "
"existing capabilities: %(exist)s" %
{'req': requested, 'exist': existing or 'none'})
raise exceptions.CapabilitiesNotFound(message,
self._resource_class,
self._capabilities)
raise exceptions.CapabilitiesNotFound(message, self._capabilities)
class ValidationFilter(Filter):
"""Filter that runs validation on nodes."""
def __init__(self, api, resource_class, capabilities):
def __init__(self, api):
self._api = api
# These are only used for better exceptions
self._resource_class = resource_class
self._capabilities = capabilities
self._failed_validation = []
self._messages = []
self._failed_nodes = []
def __call__(self, node):
try:
@ -181,45 +176,44 @@ class ValidationFilter(Filter):
message = ('Node %(node)s failed validation: %(err)s' %
{'node': _utils.log_node(node), 'err': exc})
LOG.warning(message)
self._failed_validation.append(message)
self._messages.append(message)
self._failed_nodes.append(node)
return False
return True
def fail(self):
errors = ", ".join(self._failed_validation)
errors = ", ".join(self._messages)
message = "All available nodes have failed validation: %s" % errors
raise exceptions.ValidationFailed(message,
self._resource_class,
self._capabilities)
raise exceptions.ValidationFailed(message, self._failed_nodes)
class IronicReserver(Reserver):
def __init__(self, api, resource_class, capabilities):
def __init__(self, api):
self._api = api
# These are only used for better exceptions
self._resource_class = resource_class
self._capabilities = capabilities
self._failed_nodes = []
def __call__(self, node):
result = self._api.reserve_node(node, instance_uuid=node.uuid)
try:
result = self._api.reserve_node(node, instance_uuid=node.uuid)
# Try validation again to be sure nothing has changed
validator = ValidationFilter(self._api, self._resource_class,
self._capabilities)
if not validator(result):
LOG.warning('Validation of node %s failed after reservation',
_utils.log_node(node))
try:
self._api.release_node(node)
except Exception:
LOG.exception('Failed to release the reserved node %s',
_utils.log_node(node))
validator.fail()
# Try validation again to be sure nothing has changed
validator = ValidationFilter(self._api)
if not validator(result):
LOG.warning('Validation of node %s failed after reservation',
_utils.log_node(node))
try:
self._api.release_node(node)
except Exception:
LOG.exception('Failed to release the reserved node %s',
_utils.log_node(node))
validator.fail()
return result
return result
except Exception:
self._failed_nodes.append(node)
raise
def fail(self):
raise exceptions.AllNodesReserved(self._resource_class,
self._capabilities)
raise exceptions.AllNodesReserved(self._failed_nodes)

View File

@ -19,42 +19,64 @@ class Error(Exception):
class ReservationFailed(Error):
"""Failed to reserve a suitable node."""
"""Failed to reserve a suitable node.
def __init__(self, message, requested_resource_class,
requested_capabilities):
super(ReservationFailed, self).__init__(message)
self.requested_resource_class = requested_resource_class
self.requested_capabilities = requested_capabilities
This is the base class for all reservation failures.
"""
class ResourceClassNotFound(ReservationFailed):
"""No nodes match the given resource class."""
class NodesNotFound(ReservationFailed):
"""Initial nodes lookup returned an empty list.
def __init__(self, requested_resource_class, requested_capabilities):
message = ("No available nodes found with resource class %s" %
requested_resource_class)
super(ResourceClassNotFound, self).__init__(message,
requested_resource_class,
requested_capabilities)
:ivar requested_resource_class: Requested resource class.
:ivar requested_conductor_group: Requested conductor group to pick nodes
from.
"""
def __init__(self, resource_class, conductor_group):
message = "No available nodes%(rc)s found%(cg)s" % {
'rc': 'with resource class %s' % resource_class
if resource_class else '',
'cg': 'in conductor group %s' % (conductor_group or '<default>')
if conductor_group is not None else ''
}
self.requested_resource_class = resource_class
self.requested_conductor_group = conductor_group
super(NodesNotFound, self).__init__(message)
class CapabilitiesNotFound(ReservationFailed):
"""Requested capabilities do not match any nodes."""
"""Requested capabilities do not match any nodes.
:ivar requested_capabilities: Requested node's capabilities.
"""
def __init__(self, message, capabilities):
self.requested_capabilities = capabilities
super(CapabilitiesNotFound, self).__init__(message)
class ValidationFailed(ReservationFailed):
"""Validation failed for all requested nodes."""
"""Validation failed for all requested nodes.
:ivar nodes: List of nodes that were checked.
"""
def __init__(self, message, nodes):
self.nodes = nodes
super(ValidationFailed, self).__init__(message)
class AllNodesReserved(ReservationFailed):
"""All nodes are already reserved."""
"""All nodes are already reserved.
def __init__(self, requested_resource_class, requested_capabilities):
:ivar nodes: List of nodes that were checked.
"""
def __init__(self, nodes):
self.nodes = nodes
message = 'All the candidate nodes are already reserved'
super(AllNodesReserved, self).__init__(message,
requested_resource_class,
requested_capabilities)
super(AllNodesReserved, self).__init__(message)
class InvalidImage(Error):

View File

@ -55,6 +55,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -101,6 +102,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -174,6 +176,7 @@ class TestDeploy(testtools.TestCase):
dry_run=True)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -197,6 +200,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -228,6 +232,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -261,6 +266,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -292,6 +298,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -323,6 +330,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -379,6 +387,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={'foo': 'bar', 'answer': '42'},
candidates=None
)
@ -405,6 +414,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -430,6 +440,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -458,6 +469,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -483,6 +495,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -504,6 +517,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -527,6 +541,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -550,6 +565,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -572,6 +588,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class=None,
conductor_group=None,
capabilities={},
candidates=['node1', 'node2']
)
@ -585,6 +602,29 @@ class TestDeploy(testtools.TestCase):
netboot=False,
wait=1800)
def test_args_conductor_group(self, mock_os_conf, mock_pr):
args = ['deploy', '--conductor-group', 'loc1', '--image', 'myimg',
'--resource-class', 'compute']
_cmd.main(args)
mock_pr.assert_called_once_with(
cloud_region=mock_os_conf.return_value.get_one.return_value,
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group='loc1',
capabilities={},
candidates=None
)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
image='myimg',
nics=None,
root_disk_size=None,
config=mock.ANY,
hostname=None,
netboot=False,
wait=1800)
def test_args_custom_wait(self, mock_os_conf, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--wait', '3600', '--resource-class', 'compute']
@ -594,6 +634,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)
@ -616,6 +657,7 @@ class TestDeploy(testtools.TestCase):
dry_run=False)
mock_pr.return_value.reserve_node.assert_called_once_with(
resource_class='compute',
conductor_group=None,
capabilities={},
candidates=None
)

View File

@ -54,8 +54,7 @@ class TestNodes(testtools.TestCase):
def test_get_node_by_uuid(self):
res = self.api.get_node('uuid1')
self.cli.node.get.assert_called_once_with('uuid1',
fields=_os_api.NODE_FIELDS)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_hostname(self):
@ -66,8 +65,7 @@ class TestNodes(testtools.TestCase):
]
res = self.api.get_node('host1', accept_hostname=True)
# Loading details
self.cli.node.get.assert_called_once_with('uuid1',
fields=_os_api.NODE_FIELDS)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_hostname_not_found(self):
@ -78,8 +76,7 @@ class TestNodes(testtools.TestCase):
]
res = self.api.get_node('host1', accept_hostname=True)
# Loading details
self.cli.node.get.assert_called_once_with('host1',
fields=_os_api.NODE_FIELDS)
self.cli.node.get.assert_called_once_with('host1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_node(self):
@ -90,8 +87,7 @@ class TestNodes(testtools.TestCase):
def test_get_node_by_node_with_refresh(self):
res = self.api.get_node(mock.Mock(spec=['uuid'], uuid='uuid1'),
refresh=True)
self.cli.node.get.assert_called_once_with('uuid1',
fields=_os_api.NODE_FIELDS)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_get_node_by_instance(self):
@ -104,8 +100,7 @@ class TestNodes(testtools.TestCase):
inst = _instance.Instance(mock.Mock(),
mock.Mock(spec=['uuid'], uuid='uuid1'))
res = self.api.get_node(inst, refresh=True)
self.cli.node.get.assert_called_once_with('uuid1',
fields=_os_api.NODE_FIELDS)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_find_node_by_hostname(self):
@ -116,8 +111,7 @@ class TestNodes(testtools.TestCase):
]
res = self.api.find_node_by_hostname('host1')
# Loading details
self.cli.node.get.assert_called_once_with('uuid1',
fields=_os_api.NODE_FIELDS)
self.cli.node.get.assert_called_once_with('uuid1')
self.assertIs(res, self.cli.node.get.return_value)
def test_find_node_by_hostname_cached(self):

View File

@ -25,13 +25,18 @@ from metalsmith import exceptions
from metalsmith import sources
NODE_FIELDS = ['name', 'uuid', 'instance_info', 'instance_uuid', 'maintenance',
'maintenance_reason', 'properties', 'provision_state', 'extra',
'last_error']
class Base(testtools.TestCase):
def setUp(self):
super(Base, self).setUp()
self.pr = _provisioner.Provisioner(mock.Mock())
self._reset_api_mock()
self.node = mock.Mock(spec=_os_api.NODE_FIELDS + ['to_dict'],
self.node = mock.Mock(spec=NODE_FIELDS + ['to_dict'],
uuid='000', instance_uuid=None,
properties={'local_gb': 100},
instance_info={},
@ -59,8 +64,8 @@ class TestReserveNode(Base):
def test_no_nodes(self):
self.api.list_nodes.return_value = []
self.assertRaises(exceptions.ResourceClassNotFound,
self.pr.reserve_node, 'control')
self.assertRaises(exceptions.NodesNotFound,
self.pr.reserve_node, resource_class='control')
self.assertFalse(self.api.reserve_node.called)
def test_simple_ok(self):
@ -99,12 +104,30 @@ class TestReserveNode(Base):
self.api.list_nodes.return_value = nodes
self.api.reserve_node.side_effect = lambda n, instance_uuid: n
node = self.pr.reserve_node('control', {'answer': '42'})
node = self.pr.reserve_node('control', capabilities={'answer': '42'})
self.assertIs(node, expected)
self.api.update_node.assert_called_once_with(
node, {'/instance_info/capabilities': {'answer': '42'}})
def test_custom_predicate(self):
nodes = [
mock.Mock(spec=['uuid', 'name', 'properties'],
properties={'local_gb': 100}),
mock.Mock(spec=['uuid', 'name', 'properties'],
properties={'local_gb': 150}),
mock.Mock(spec=['uuid', 'name', 'properties'],
properties={'local_gb': 200}),
]
self.api.list_nodes.return_value = nodes[:]
self.api.reserve_node.side_effect = lambda n, instance_uuid: n
node = self.pr.reserve_node(
predicate=lambda node: 100 < node.properties['local_gb'] < 200)
self.assertEqual(node, nodes[1])
self.assertFalse(self.api.update_node.called)
def test_provided_node(self):
nodes = [
mock.Mock(spec=['uuid', 'name', 'properties'],
@ -153,6 +176,28 @@ class TestReserveNode(Base):
self.api.update_node.assert_called_once_with(
node, {'/instance_info/capabilities': {'cat': 'meow'}})
def test_nodes_filtered_by_conductor_group(self):
nodes = [
mock.Mock(spec=['uuid', 'name', 'properties', 'conductor_group'],
properties={'local_gb': 100}, conductor_group='loc1'),
mock.Mock(spec=['uuid', 'name', 'properties', 'conductor_group'],
properties={'local_gb': 100, 'capabilities': 'cat:meow'},
conductor_group=''),
mock.Mock(spec=['uuid', 'name', 'properties', 'conductor_group'],
properties={'local_gb': 100, 'capabilities': 'cat:meow'},
conductor_group='loc1'),
]
self.api.reserve_node.side_effect = lambda n, instance_uuid: n
node = self.pr.reserve_node(conductor_group='loc1',
candidates=nodes,
capabilities={'cat': 'meow'})
self.assertEqual(node, nodes[2])
self.assertFalse(self.api.list_nodes.called)
self.api.update_node.assert_called_once_with(
node, {'/instance_info/capabilities': {'cat': 'meow'}})
CLEAN_UP = {
'/extra/metalsmith_created_ports': _os_api.REMOVE,
@ -848,7 +893,7 @@ class TestWaitForState(Base):
def test_success_one_node(self, mock_sleep):
nodes = [
mock.Mock(spec=_os_api.NODE_FIELDS, provision_state=state)
mock.Mock(spec=NODE_FIELDS, provision_state=state)
for state in ('deploying', 'deploy wait', 'deploying', 'active')
]
self.api.get_node.side_effect = nodes
@ -862,7 +907,7 @@ class TestWaitForState(Base):
def test_success_several_nodes(self, mock_sleep):
nodes = [
mock.Mock(spec=_os_api.NODE_FIELDS, provision_state=state)
mock.Mock(spec=NODE_FIELDS, provision_state=state)
for state in ('deploying', 'deploy wait', # iteration 1
'deploying', 'active', # iteration 2
'active') # iteration 3
@ -879,7 +924,7 @@ class TestWaitForState(Base):
def test_one_node_failed(self, mock_sleep):
nodes = [
mock.Mock(spec=_os_api.NODE_FIELDS, provision_state=state)
mock.Mock(spec=NODE_FIELDS, provision_state=state)
for state in ('deploying', 'deploy wait', # iteration 1
'deploying', 'deploy failed', # iteration 2
'active') # iteration 3
@ -898,7 +943,7 @@ class TestWaitForState(Base):
def test_timeout(self, mock_sleep):
def _fake_get(*args, **kwargs):
while True:
yield mock.Mock(spec=_os_api.NODE_FIELDS,
yield mock.Mock(spec=NODE_FIELDS,
provision_state='deploying')
self.api.get_node.side_effect = _fake_get()
@ -913,7 +958,7 @@ class TestWaitForState(Base):
def test_custom_delay(self, mock_sleep):
nodes = [
mock.Mock(spec=_os_api.NODE_FIELDS, provision_state=state)
mock.Mock(spec=NODE_FIELDS, provision_state=state)
for state in ('deploying', 'deploy wait', 'deploying', 'active')
]
self.api.get_node.side_effect = nodes
@ -930,7 +975,7 @@ class TestListInstances(Base):
def setUp(self):
super(TestListInstances, self).setUp()
self.nodes = [
mock.Mock(spec=_os_api.NODE_FIELDS, provision_state=state,
mock.Mock(spec=NODE_FIELDS, provision_state=state,
instance_info={'metalsmith_hostname': '1234'})
for state in ('active', 'active', 'deploying', 'wait call-back',
'deploy failed', 'available')

View File

@ -113,35 +113,35 @@ class TestScheduleNode(testtools.TestCase):
class TestCapabilitiesFilter(testtools.TestCase):
def test_fail_no_capabilities(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute'})
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: none',
fltr.fail)
def test_nothing_requested_nothing_found(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {})
fltr = _scheduler.CapabilitiesFilter({})
node = mock.Mock(properties={}, spec=['properties', 'name', 'uuid'])
self.assertTrue(fltr(node))
def test_matching_node(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute',
'foo': 'bar'})
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute',
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,profile:compute,answer:42'},
spec=['properties', 'name', 'uuid'])
self.assertTrue(fltr(node))
def test_not_matching_node(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute',
'foo': 'bar'})
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute',
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,answer:42'},
spec=['properties', 'name', 'uuid'])
self.assertFalse(fltr(node))
def test_fail_message(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute'})
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
node = mock.Mock(
properties={'capabilities': 'profile:control'},
spec=['properties', 'name', 'uuid'])
@ -153,7 +153,7 @@ class TestCapabilitiesFilter(testtools.TestCase):
fltr.fail)
def test_malformed_capabilities(self):
fltr = _scheduler.CapabilitiesFilter('rsc', {'profile': 'compute'})
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
for cap in ['foo,profile:control', 42, 'a:b:c']:
node = mock.Mock(properties={'capabilities': cap},
spec=['properties', 'name', 'uuid'])
@ -169,8 +169,7 @@ class TestValidationFilter(testtools.TestCase):
def setUp(self):
super(TestValidationFilter, self).setUp()
self.api = mock.Mock(spec=['validate_node'])
self.fltr = _scheduler.ValidationFilter(self.api, 'rsc',
{'profile': 'compute'})
self.fltr = _scheduler.ValidationFilter(self.api)
def test_pass(self):
node = mock.Mock(spec=['uuid', 'name'])
@ -195,7 +194,7 @@ class TestIronicReserver(testtools.TestCase):
self.node = mock.Mock(spec=['uuid', 'name'])
self.api = mock.Mock(spec=['reserve_node', 'release_node'])
self.api.reserve_node.side_effect = lambda node, instance_uuid: node
self.reserver = _scheduler.IronicReserver(self.api, 'rsc', {})
self.reserver = _scheduler.IronicReserver(self.api)
def test_fail(self, mock_validation):
self.assertRaisesRegex(exceptions.AllNodesReserved,

View File

@ -17,6 +17,8 @@ The following optional variables provide the defaults for Instance_ attributes:
the default for ``candidates``.
``metalsmith_capabilities``
the default for ``capabilities``.
``metalsmith_conductor_group``
the default for ``conductor_group``.
``metalsmith_extra_args``
the default for ``extra_args``.
``metalsmith_image``
@ -43,6 +45,11 @@ Each instances has the following attributes:
list of nodes (UUIDs or names) to be considered for deployment.
``capabilities`` (defaults to ``metalsmith_capabilities``)
node capabilities to request when scheduling.
``conductor_group`` (defaults to ``metalsmith_conductor_group``)
conductor group to pick nodes from.
.. note:: Currently it's not possible to specify the default group.
``extra_args`` (defaults to ``metalsmith_extra_args``)
additional arguments to pass to the ``metalsmith`` CLI on all calls.
``image`` (defaults to ``metalsmith_image``)

View File

@ -1,6 +1,7 @@
# Optional parameters
metalsmith_candidates: []
metalsmith_capabilities: {}
metalsmith_conductor_group:
metalsmith_extra_args:
metalsmith_netboot: false
metalsmith_nics: []

View File

@ -28,6 +28,9 @@
{% if resource_class %}
--resource-class {{ resource_class }}
{% endif %}
{% if conductor_group %}
--conductor-group {{ conductor_group }}
{% endif %}
{% for node in candidates %}
--candidate {{ node }}
{% endfor %}
@ -35,6 +38,7 @@
vars:
candidates: "{{ instance.candidates | default(metalsmith_candidates) }}"
capabilities: "{{ instance.capabilities | default(metalsmith_capabilities) }}"
conductor_group: "{{ instance.conductor_group | default(metalsmith_conductor_group) }}"
extra_args: "{{ instance.extra_args | default(metalsmith_extra_args) }}"
image: "{{ instance.image | default(metalsmith_image) }}"
netboot: "{{ instance.netboot | default(metalsmith_netboot) }}"