Support updating IPMI credentials from within ramdisk

If ipmi_setup_credentials is set to true in Node.extra, return
desired credentials back to the ramdisk and wait for power management
interface to pass validation.

Unfortunately, due to newly revealed problems, this is reverting
a big part of blueprint returning-to-ramdisk.

Change-Id: I63976d01f265d18c385f6a30f1f2884c58ca5a43
Implements: blueprint setup-ipmi-credentials
This commit is contained in:
Dmitry Tantsur
2014-12-10 20:33:09 +01:00
parent 8ffae37346
commit 43b0769703
5 changed files with 65 additions and 65 deletions

View File

@@ -201,14 +201,12 @@ Change Log
v1.0.0
~~~~~~
* Support updating IPMI credentials from within ramdisk.
* ``/v1/continue`` is now sync:
* Errors are properly returned to the caller
* This call now returns value in form of ``{'node': <node dict>}`` on
success
* This call now returns value as a JSON dict
* Option ``power_off_after_discovery`` controls whether to force power off
after the successful discovery, and is ``False`` by default.
* Discovery now times out by default.
* Add support for plugins that hook into data processing pipeline, see
`plugin-architecture blueprint`_ for details.

View File

@@ -28,8 +28,6 @@
;timeout = 3600
; Amount of time in seconds, after which repeat clean up of timed out nodes.
;firewall_update_period = 60
; Whether to power off the ramdisk immediately after the successful discovery.
;power_off_after_discovery = false
; IP to listen on.
;listen_address = 0.0.0.0

View File

@@ -28,7 +28,6 @@ DEFAULTS = {
'processing_hooks': 'scheduler,validate_interfaces',
'timeout': '3600',
'clean_up_period': '60',
'power_off_after_discovery': 'false',
}

View File

@@ -28,7 +28,7 @@ from ironic_discoverd import utils
LOG = logging.getLogger("ironic_discoverd.process")
_POWER_OFF_CHECK_PERIOD = 5
_POWER_CHECK_PERIOD = 5
def process(node_info):
@@ -56,8 +56,7 @@ def process(node_info):
cached_node.uuid,
code=404)
updated = _process_node(ironic, node, node_info, cached_node)
return {'node': updated.to_dict()}
return _process_node(ironic, node, node_info, cached_node)
def _run_post_hooks(node, ports, node_info):
@@ -102,35 +101,46 @@ def _process_node(ironic, node, node_info, cached_node):
firewall.update_filters(ironic)
if conf.getboolean('discoverd', 'power_off_after_discovery'):
LOG.info('Forcing power off of node %s', node.uuid)
try:
ironic.node.set_power_state(node.uuid, 'off')
except Exception as exc:
LOG.error('Failed to power off node %s, check it\'s power '
'management configuration:\n%s', node.uuid, exc)
raise utils.DiscoveryFailed('Failed to power off node %s' %
node.uuid)
eventlet.greenthread.spawn_n(_wait_for_power_off, ironic, cached_node)
return node
if node.extra.get('ipmi_setup_credentials'):
eventlet.greenthread.spawn_n(_wait_for_power_management,
ironic, cached_node)
return {'ipmi_setup_credentials': True,
'ipmi_username': node.driver_info.get('ipmi_username'),
'ipmi_password': node.driver_info.get('ipmi_password')}
else:
_finish_discovery(ironic, cached_node)
return {}
def _wait_for_power_off(ironic, cached_node):
def _wait_for_power_management(ironic, cached_node):
deadline = cached_node.started_at + conf.getint('discoverd', 'timeout')
# NOTE(dtantsur): even VM's don't power off instantly, sleep first
while time.time() < deadline:
eventlet.greenthread.sleep(_POWER_OFF_CHECK_PERIOD)
node = ironic.node.get(cached_node.uuid)
if (node.power_state or 'power off').lower() == 'power off':
_finish_discovery(ironic, node)
eventlet.greenthread.sleep(_POWER_CHECK_PERIOD)
validation = ironic.node.validate(cached_node.uuid)
if validation.power['result']:
_finish_discovery(ironic, cached_node)
return
LOG.debug('Waiting for management credentials on node %s '
'to be updated, current error: %s',
cached_node.uuid, validation.power['reason'])
LOG.error('Timeout waiting for power off state of node %s after discovery',
cached_node.uuid)
LOG.error('Timeout waiting for power credentials update of node %s '
'after discovery', cached_node.uuid)
def _force_power_off(ironic, node):
LOG.debug('Forcing power off of node %s', node.uuid)
try:
ironic.node.set_power_state(node.uuid, 'off')
except Exception as exc:
LOG.error('Failed to power off node %s, check it\'s power '
'management configuration:\n%s', node.uuid, exc)
raise utils.DiscoveryFailed('Failed to power off node %s' % node.uuid)
def _finish_discovery(ironic, node):
_force_power_off(ironic, node)
patch = [{'op': 'add', 'path': '/extra/newly_discovered', 'value': 'true'},
{'op': 'remove', 'path': '/extra/on_discovery'}]
ironic.node.update(node.uuid, patch)

View File

@@ -58,7 +58,7 @@ class BaseTest(test_base.NodeTest):
class TestProcess(BaseTest):
def setUp(self):
super(TestProcess, self).setUp()
self.fake_node_json = 'node json'
self.fake_result_json = 'node json'
def prepate_mocks(func):
@functools.wraps(func)
@@ -69,8 +69,7 @@ class TestProcess(BaseTest):
started_at=self.started_at)
cli.port.create.side_effect = self.ports
cli.node.get.return_value = self.node
process_mock.return_value.to_dict.return_value = (
self.fake_node_json)
process_mock.return_value = self.fake_result_json
return func(self, cli, pop_mock, process_mock)
@@ -80,7 +79,7 @@ class TestProcess(BaseTest):
def test_ok(self, cli, pop_mock, process_mock):
res = process.process(self.data)
self.assertEqual({'node': self.fake_node_json}, res)
self.assertEqual(self.fake_result_json, res)
# By default interfaces w/o IP are dropped
self.assertEqual(['em1', 'em2'], sorted(self.data['interfaces']))
@@ -95,9 +94,7 @@ class TestProcess(BaseTest):
@prepate_mocks
def test_no_ipmi(self, cli, pop_mock, process_mock):
del self.data['ipmi_address']
res = process.process(self.data)
self.assertEqual({'node': self.fake_node_json}, res)
process.process(self.data)
pop_mock.assert_called_once_with(bmc_address=None,
mac=self.data['macs'])
@@ -109,9 +106,7 @@ class TestProcess(BaseTest):
def test_deprecated_macs(self, cli, pop_mock, process_mock):
del self.data['interfaces']
self.data['macs'] = self.macs
res = process.process(self.data)
self.assertEqual({'node': self.fake_node_json}, res)
process.process(self.data)
self.assertEqual(self.macs, sorted(i['mac'] for i in
self.data['interfaces'].values()))
@@ -126,9 +121,7 @@ class TestProcess(BaseTest):
@prepate_mocks
def test_ports_for_inactive(self, cli, pop_mock, process_mock):
conf.CONF.set('discoverd', 'ports_for_inactive_interfaces', 'true')
res = process.process(self.data)
self.assertEqual({'node': self.fake_node_json}, res)
process.process(self.data)
self.assertEqual(['em1', 'em2', 'em3'],
sorted(self.data['interfaces']))
@@ -208,10 +201,10 @@ class TestProcessNode(BaseTest):
super(TestProcessNode, self).setUp()
conf.CONF.set('discoverd', 'processing_hooks',
'ramdisk_error,scheduler,validate_interfaces,example')
self.validate_attempts = 5
self.data['macs'] = self.macs # validate_interfaces hook
self.cached_node = node_cache.NodeInfo(uuid=self.uuid,
started_at=self.started_at)
self.power_off_repeats = 5
self.patch_before = [
{'op': 'add', 'path': '/properties/cpus', 'value': '2'},
{'op': 'add', 'path': '/properties/memory_mb', 'value': '1024'},
@@ -222,16 +215,15 @@ class TestProcessNode(BaseTest):
]
self.cli = mock.Mock()
self.cli.node.get.side_effect = self.fake_get()
self.cli.node.validate.side_effect = self.fake_validate()
self.cli.port.create.side_effect = self.ports
self.cli.node.update.return_value = self.node
def fake_get(self):
# Simulate long power off
for _ in range(self.power_off_repeats):
yield self.node
self.node.power_state = 'power off'
yield self.node
def fake_validate(self):
# Simulate long ramdisk task
for _ in range(self.validate_attempts):
yield mock.Mock(power={'result': False, 'reason': 'boom!'})
yield mock.Mock(power={'result': True})
def call(self):
return process._process_node(self.cli, self.node, self.data,
@@ -240,16 +232,14 @@ class TestProcessNode(BaseTest):
def test_ok(self, filters_mock, post_hook_mock):
self.call()
self.cli.node.get.assert_called_with(self.uuid)
self.assertEqual(self.power_off_repeats + 1,
self.cli.node.get.call_count)
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0])
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1])
self.cli.node.update.assert_any_call(self.uuid, self.patch_before)
self.cli.node.update.assert_any_call(self.uuid, self.patch_after)
self.assertFalse(self.cli.node.set_power_state.called)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.assertFalse(self.cli.node.validate.called)
post_hook_mock.assert_called_once_with(self.node, mock.ANY,
self.data)
@@ -286,25 +276,30 @@ class TestProcessNode(BaseTest):
self.cli.port.update.assert_called_once_with(self.ports[1].uuid,
port_patch)
def test_ipmi_setup_credentials(self, filters_mock, post_hook_mock):
self.node.extra['ipmi_setup_credentials'] = True
self.call()
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.cli.node.validate.assert_called_with(self.uuid)
self.assertEqual(self.validate_attempts + 1,
self.cli.node.validate.call_count)
@mock.patch.object(time, 'time')
def test_power_timeout(self, time_mock, filters_mock, post_hook_mock):
def test_ipmi_setup_credentials_timeout(self, time_mock, filters_mock,
post_hook_mock):
conf.CONF.set('discoverd', 'timeout', '100')
self.node.extra['ipmi_setup_credentials'] = True
time_mock.return_value = self.started_at + 1000
self.call()
self.cli.node.update.assert_called_once_with(self.uuid,
self.patch_before)
self.assertFalse(self.cli.node.set_power_state.called)
def test_force_power_off(self, filters_mock, post_hook_mock):
conf.CONF.set('discoverd', 'power_off_after_discovery', 'true')
self.call()
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
def test_force_power_off_failed(self, filters_mock, post_hook_mock):
conf.CONF.set('discoverd', 'power_off_after_discovery', 'true')
def test_power_off_failed(self, filters_mock, post_hook_mock):
self.cli.node.set_power_state.side_effect = exceptions.Conflict()
self.assertRaisesRegexp(utils.DiscoveryFailed, 'Failed to power off',