Passing thread pool size to IPA for parallel erasure
Adds a configuration option [deploy]disk_erasure_concurrency to define the size of thread pool, and pass it to IPA. The value defines the maximum number of threads can be utilized for parallel disk erasure at IPA side. Change-Id: Ie7fe8ac730e7ee161443b39a8c0fe79b2848fc87 Story: 1546949 Task: 26973
This commit is contained in:
parent
ab1b117ee4
commit
a83f13b6e8
@ -66,6 +66,13 @@ opts = [
|
|||||||
'node will be put in ``clean failed`` state. '
|
'node will be put in ``clean failed`` state. '
|
||||||
'If True, shred will be invoked and cleaning will '
|
'If True, shred will be invoked and cleaning will '
|
||||||
'continue.')),
|
'continue.')),
|
||||||
|
cfg.IntOpt('disk_erasure_concurrency',
|
||||||
|
default=1,
|
||||||
|
min=1,
|
||||||
|
help=_('Defines the target pool size used by Ironic Python '
|
||||||
|
'Agent ramdisk to erase disk devices. The number of '
|
||||||
|
'threads created to erase disks will not exceed this '
|
||||||
|
'value or the number of disks to be erased.')),
|
||||||
cfg.BoolOpt('power_off_after_deploy_failure',
|
cfg.BoolOpt('power_off_after_deploy_failure',
|
||||||
default=True,
|
default=True,
|
||||||
help=_('Whether to power off a node after deploy failure. '
|
help=_('Whether to power off a node after deploy failure. '
|
||||||
|
@ -660,6 +660,7 @@ def agent_add_clean_params(task):
|
|||||||
info['agent_continue_if_ata_erase_failed'] = erase_fallback
|
info['agent_continue_if_ata_erase_failed'] = erase_fallback
|
||||||
secure_erase = CONF.deploy.enable_ata_secure_erase
|
secure_erase = CONF.deploy.enable_ata_secure_erase
|
||||||
info['agent_enable_ata_secure_erase'] = secure_erase
|
info['agent_enable_ata_secure_erase'] = secure_erase
|
||||||
|
info['disk_erasure_concurrency'] = CONF.deploy.disk_erasure_concurrency
|
||||||
|
|
||||||
task.node.driver_internal_info = info
|
task.node.driver_internal_info = info
|
||||||
task.node.save()
|
task.node.save()
|
||||||
|
@ -1633,6 +1633,7 @@ class AgentMethodsTestCase(db_base.DbTestCase):
|
|||||||
cfg.CONF.set_override('continue_if_disk_secure_erase_fails', True,
|
cfg.CONF.set_override('continue_if_disk_secure_erase_fails', True,
|
||||||
'deploy')
|
'deploy')
|
||||||
cfg.CONF.set_override('enable_ata_secure_erase', False, 'deploy')
|
cfg.CONF.set_override('enable_ata_secure_erase', False, 'deploy')
|
||||||
|
cfg.CONF.set_override('disk_erasure_concurrency', 8, 'deploy')
|
||||||
with task_manager.acquire(
|
with task_manager.acquire(
|
||||||
self.context, self.node.uuid, shared=False) as task:
|
self.context, self.node.uuid, shared=False) as task:
|
||||||
utils.agent_add_clean_params(task)
|
utils.agent_add_clean_params(task)
|
||||||
@ -1644,6 +1645,8 @@ class AgentMethodsTestCase(db_base.DbTestCase):
|
|||||||
'agent_continue_if_ata_erase_failed'])
|
'agent_continue_if_ata_erase_failed'])
|
||||||
self.assertIs(False, task.node.driver_internal_info[
|
self.assertIs(False, task.node.driver_internal_info[
|
||||||
'agent_enable_ata_secure_erase'])
|
'agent_enable_ata_secure_erase'])
|
||||||
|
self.assertEqual(8, task.node.driver_internal_info[
|
||||||
|
'disk_erasure_concurrency'])
|
||||||
|
|
||||||
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
|
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
|
||||||
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
|
@mock.patch('ironic.conductor.utils.node_power_action', autospec=True)
|
||||||
|
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- Adds a configuration option ``[deploy]disk_erasure_concurrency`` to
|
||||||
|
define the target pool size used by Ironic Python Agent ramdisk to
|
||||||
|
erase disk devices. The number of threads created by IPA to erase
|
||||||
|
disk devices is the minimum value of target pool size and the number of
|
||||||
|
disks to be erased. This feature can greatly reduce the operation time
|
||||||
|
for baremetals with multiple disks. For the backwards compatibility,
|
||||||
|
the default value is 1.
|
Loading…
Reference in New Issue
Block a user