Merge "Added auto-configure for node group"
This commit is contained in:
commit
ec7995d870
@ -88,7 +88,10 @@ class SaharaCluster(context.Context):
|
||||
},
|
||||
"enable_proxy": {
|
||||
"type": "boolean"
|
||||
}
|
||||
},
|
||||
"use_autoconfig": {
|
||||
"type": "boolean"
|
||||
},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["plugin_name", "hadoop_version", "workers_count",
|
||||
@ -135,7 +138,8 @@ class SaharaCluster(context.Context):
|
||||
enable_anti_affinity=self.config.get("enable_anti_affinity",
|
||||
False),
|
||||
enable_proxy=self.config.get("enable_proxy", False),
|
||||
wait_active=False
|
||||
wait_active=False,
|
||||
use_autoconfig=self.config.get("use_autoconfig", True)
|
||||
)
|
||||
|
||||
self.context["tenants"][tenant_id]["sahara"]["cluster"] = (
|
||||
|
@ -48,7 +48,8 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
security_groups=None, node_configs=None,
|
||||
cluster_configs=None,
|
||||
enable_anti_affinity=False,
|
||||
enable_proxy=False):
|
||||
enable_proxy=False,
|
||||
use_autoconfig=True):
|
||||
"""Launch and delete a Sahara Cluster.
|
||||
|
||||
This scenario launches a Hadoop cluster, waits until it becomes
|
||||
@ -87,6 +88,10 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
one per compute node.
|
||||
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
|
||||
do not assign floating ips to workers.
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
"""
|
||||
|
||||
image_id = self.context["tenant"]["sahara"]["image"]
|
||||
@ -109,7 +114,8 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
node_configs=node_configs,
|
||||
cluster_configs=cluster_configs,
|
||||
enable_anti_affinity=enable_anti_affinity,
|
||||
enable_proxy=enable_proxy)
|
||||
enable_proxy=enable_proxy,
|
||||
use_autoconfig=use_autoconfig)
|
||||
|
||||
self._delete_cluster(cluster)
|
||||
|
||||
@ -132,7 +138,8 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
security_groups=None, node_configs=None,
|
||||
cluster_configs=None,
|
||||
enable_anti_affinity=False,
|
||||
enable_proxy=False):
|
||||
enable_proxy=False,
|
||||
use_autoconfig=True):
|
||||
"""Launch, scale and delete a Sahara Cluster.
|
||||
|
||||
This scenario launches a Hadoop cluster, waits until it becomes
|
||||
@ -179,6 +186,10 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
one per compute node.
|
||||
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
|
||||
do not assign floating ips to workers.
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
"""
|
||||
|
||||
image_id = self.context["tenant"]["sahara"]["image"]
|
||||
@ -201,7 +212,8 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
node_configs=node_configs,
|
||||
cluster_configs=cluster_configs,
|
||||
enable_anti_affinity=enable_anti_affinity,
|
||||
enable_proxy=enable_proxy)
|
||||
enable_proxy=enable_proxy,
|
||||
use_autoconfig=use_autoconfig)
|
||||
|
||||
for delta in deltas:
|
||||
# The Cluster is fetched every time so that its node groups have
|
||||
|
@ -30,7 +30,8 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
@scenario.configure(context={"cleanup": ["sahara"]})
|
||||
def create_and_list_node_group_templates(self, flavor,
|
||||
plugin_name="vanilla",
|
||||
hadoop_version="1.2.1"):
|
||||
hadoop_version="1.2.1",
|
||||
use_autoconfig=True):
|
||||
"""Create and list Sahara Node Group Templates.
|
||||
|
||||
This scenario creates two Node Group Templates with different set of
|
||||
@ -48,14 +49,20 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
"""
|
||||
|
||||
self._create_master_node_group_template(flavor_id=flavor,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version)
|
||||
hadoop_version=hadoop_version,
|
||||
use_autoconfig=use_autoconfig)
|
||||
self._create_worker_node_group_template(flavor_id=flavor,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version)
|
||||
hadoop_version=hadoop_version,
|
||||
use_autoconfig=use_autoconfig)
|
||||
self._list_node_group_templates()
|
||||
|
||||
@types.convert(flavor={"type": "nova_flavor"})
|
||||
@ -65,7 +72,8 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
@scenario.configure(context={"cleanup": ["sahara"]})
|
||||
def create_delete_node_group_templates(self, flavor,
|
||||
plugin_name="vanilla",
|
||||
hadoop_version="1.2.1"):
|
||||
hadoop_version="1.2.1",
|
||||
use_autoconfig=True):
|
||||
"""Create and delete Sahara Node Group Templates.
|
||||
|
||||
This scenario creates and deletes two most common types of
|
||||
@ -79,17 +87,23 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
"""
|
||||
|
||||
master_ngt = self._create_master_node_group_template(
|
||||
flavor_id=flavor,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version)
|
||||
hadoop_version=hadoop_version,
|
||||
use_autoconfig=use_autoconfig)
|
||||
|
||||
worker_ngt = self._create_worker_node_group_template(
|
||||
flavor_id=flavor,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version)
|
||||
hadoop_version=hadoop_version,
|
||||
use_autoconfig=use_autoconfig)
|
||||
|
||||
self._delete_node_group_template(master_ngt)
|
||||
self._delete_node_group_template(worker_ngt)
|
||||
|
@ -70,13 +70,18 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
|
||||
@atomic.action_timer("sahara.create_master_node_group_template")
|
||||
def _create_master_node_group_template(self, flavor_id, plugin_name,
|
||||
hadoop_version):
|
||||
hadoop_version,
|
||||
use_autoconfig=True):
|
||||
"""Create a master Node Group Template with a random name.
|
||||
|
||||
:param flavor_id: The required argument for the Template
|
||||
:param plugin_name: Sahara provisioning plugin name
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the plugin
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
:returns: The created Template
|
||||
"""
|
||||
name = self.generate_random_name()
|
||||
@ -87,17 +92,22 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
hadoop_version=hadoop_version,
|
||||
flavor_id=flavor_id,
|
||||
node_processes=sahara_consts.NODE_PROCESSES[plugin_name]
|
||||
[hadoop_version]["master"])
|
||||
[hadoop_version]["master"],
|
||||
use_autoconfig=use_autoconfig)
|
||||
|
||||
@atomic.action_timer("sahara.create_worker_node_group_template")
|
||||
def _create_worker_node_group_template(self, flavor_id, plugin_name,
|
||||
hadoop_version):
|
||||
hadoop_version, use_autoconfig):
|
||||
"""Create a worker Node Group Template with a random name.
|
||||
|
||||
:param flavor_id: The required argument for the Template
|
||||
:param plugin_name: Sahara provisioning plugin name
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the plugin
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
:returns: The created Template
|
||||
"""
|
||||
name = self.generate_random_name()
|
||||
@ -108,7 +118,8 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
hadoop_version=hadoop_version,
|
||||
flavor_id=flavor_id,
|
||||
node_processes=sahara_consts.NODE_PROCESSES[plugin_name]
|
||||
[hadoop_version]["worker"])
|
||||
[hadoop_version]["worker"],
|
||||
use_autoconfig=use_autoconfig)
|
||||
|
||||
@atomic.action_timer("sahara.delete_node_group_template")
|
||||
def _delete_node_group_template(self, node_group):
|
||||
@ -231,6 +242,13 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
|
||||
return node_groups
|
||||
|
||||
def _setup_node_autoconfig(self, node_groups, node_autoconfig):
|
||||
LOG.debug("Adding auto-config par to Node Groups")
|
||||
for ng in node_groups:
|
||||
ng["use_autoconfig"] = node_autoconfig
|
||||
|
||||
return node_groups
|
||||
|
||||
def _setup_replication_config(self, hadoop_version, workers_count,
|
||||
plugin_name):
|
||||
replication_value = min(workers_count, 3)
|
||||
@ -257,7 +275,8 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
security_groups=None, node_configs=None,
|
||||
cluster_configs=None, enable_anti_affinity=False,
|
||||
enable_proxy=False,
|
||||
wait_active=True):
|
||||
wait_active=True,
|
||||
use_autoconfig=True):
|
||||
"""Create a cluster and wait until it becomes Active.
|
||||
|
||||
The cluster is created with two node groups. The master Node Group is
|
||||
@ -293,6 +312,10 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
|
||||
do not assign floating ips to workers.
|
||||
:param wait_active: Wait until a Cluster gets int "Active" state
|
||||
:param use_autoconfig: If True, instances of the node group will be
|
||||
automatically configured during cluster
|
||||
creation. If False, the configuration values
|
||||
should be specify manually
|
||||
:returns: created cluster
|
||||
"""
|
||||
|
||||
@ -361,6 +384,8 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
|
||||
node_groups = self._setup_node_configs(node_groups, node_configs)
|
||||
|
||||
node_groups = self._setup_node_autoconfig(node_groups, use_autoconfig)
|
||||
|
||||
replication_config = self._setup_replication_config(hadoop_version,
|
||||
workers_count,
|
||||
plugin_name)
|
||||
@ -385,7 +410,8 @@ class SaharaScenario(scenario.OpenStackScenario):
|
||||
default_image_id=image_id,
|
||||
net_id=neutron_net_id,
|
||||
cluster_configs=merged_cluster_configs,
|
||||
anti_affinity=aa_processes
|
||||
anti_affinity=aa_processes,
|
||||
use_autoconfig=use_autoconfig
|
||||
)
|
||||
|
||||
if wait_active:
|
||||
|
@ -94,7 +94,8 @@ class SaharaClusterTestCase(test.ScenarioTestCase):
|
||||
cluster_configs=None,
|
||||
enable_anti_affinity=False,
|
||||
enable_proxy=False,
|
||||
wait_active=False
|
||||
wait_active=False,
|
||||
use_autoconfig=True
|
||||
))
|
||||
|
||||
self.clients("sahara").clusters.get.side_effect = [
|
||||
@ -132,7 +133,8 @@ class SaharaClusterTestCase(test.ScenarioTestCase):
|
||||
security_groups=None,
|
||||
node_configs=None,
|
||||
cluster_configs=None,
|
||||
wait_active=False
|
||||
wait_active=False,
|
||||
use_autoconfig=True
|
||||
))
|
||||
|
||||
self.clients("sahara").clusters.get.side_effect = [
|
||||
|
@ -62,7 +62,8 @@ class SaharaClustersTestCase(test.ScenarioTestCase):
|
||||
node_configs=None,
|
||||
cluster_configs=None,
|
||||
enable_anti_affinity=False,
|
||||
enable_proxy=False)
|
||||
enable_proxy=False,
|
||||
use_autoconfig=True)
|
||||
|
||||
mock__delete_cluster.assert_called_once_with(
|
||||
mock__launch_cluster.return_value)
|
||||
@ -105,7 +106,8 @@ class SaharaClustersTestCase(test.ScenarioTestCase):
|
||||
node_configs=None,
|
||||
cluster_configs=None,
|
||||
enable_anti_affinity=False,
|
||||
enable_proxy=False)
|
||||
enable_proxy=False,
|
||||
use_autoconfig=True)
|
||||
|
||||
mock__delete_cluster.assert_called_once_with(
|
||||
mock__launch_cluster.return_value)
|
||||
@ -154,7 +156,8 @@ class SaharaClustersTestCase(test.ScenarioTestCase):
|
||||
node_configs=None,
|
||||
cluster_configs=None,
|
||||
enable_anti_affinity=False,
|
||||
enable_proxy=False)
|
||||
enable_proxy=False,
|
||||
use_autoconfig=True)
|
||||
|
||||
mock__scale_cluster.assert_has_calls([
|
||||
mock.call(self.clients("sahara").clusters.get.return_value, 1),
|
||||
|
@ -48,11 +48,13 @@ class SaharaNodeGroupTemplatesTestCase(test.TestCase):
|
||||
mock__create_master_node_group_template.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
hadoop_version="test_version",
|
||||
use_autoconfig=True)
|
||||
mock__create_worker_node_group_template.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
hadoop_version="test_version",
|
||||
use_autoconfig=True)
|
||||
mock__list_node_group_templates.assert_called_once_with()
|
||||
|
||||
@mock.patch(SAHARA_NGTS + "._delete_node_group_template")
|
||||
@ -75,11 +77,13 @@ class SaharaNodeGroupTemplatesTestCase(test.TestCase):
|
||||
mock__create_master_node_group_template.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
hadoop_version="test_version",
|
||||
use_autoconfig=True)
|
||||
mock__create_worker_node_group_template.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
hadoop_version="test_version",
|
||||
use_autoconfig=True)
|
||||
|
||||
mock__delete_node_group_template.assert_has_calls(calls=[
|
||||
mock.call(mock__create_master_node_group_template.return_value),
|
||||
|
@ -79,12 +79,14 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
scenario._create_master_node_group_template(
|
||||
flavor_id="test_flavor",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version"
|
||||
hadoop_version="test_version",
|
||||
use_autoconfig=True
|
||||
)
|
||||
scenario._create_worker_node_group_template(
|
||||
flavor_id="test_flavor",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version"
|
||||
hadoop_version="test_version",
|
||||
use_autoconfig=True
|
||||
)
|
||||
|
||||
create_calls = [
|
||||
@ -93,13 +95,15 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
flavor_id="test_flavor",
|
||||
node_processes=["p1"]),
|
||||
node_processes=["p1"],
|
||||
use_autoconfig=True),
|
||||
mock.call(
|
||||
name="random_name",
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
flavor_id="test_flavor",
|
||||
node_processes=["p2"]
|
||||
node_processes=["p2"],
|
||||
use_autoconfig=True
|
||||
)]
|
||||
self.clients("sahara").node_group_templates.create.assert_has_calls(
|
||||
create_calls)
|
||||
@ -175,6 +179,7 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
"auto_security_group": True,
|
||||
"security_groups": ["g1", "g2"],
|
||||
"node_configs": {"HDFS": {"local_config": "local_value"}},
|
||||
"use_autoconfig": True,
|
||||
}, {
|
||||
"name": "worker-ng",
|
||||
"flavor_id": "test_flavor_w",
|
||||
@ -186,6 +191,7 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
"auto_security_group": True,
|
||||
"security_groups": ["g1", "g2"],
|
||||
"node_configs": {"HDFS": {"local_config": "local_value"}},
|
||||
"use_autoconfig": True,
|
||||
}
|
||||
]
|
||||
|
||||
@ -210,7 +216,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
auto_security_group=True,
|
||||
security_groups=["g1", "g2"],
|
||||
workers_count=42,
|
||||
node_configs={"HDFS": {"local_config": "local_value"}}
|
||||
node_configs={"HDFS": {"local_config": "local_value"}},
|
||||
use_autoconfig=True
|
||||
)
|
||||
|
||||
self.clients("sahara").clusters.create.assert_called_once_with(
|
||||
@ -221,7 +228,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
default_image_id="test_image",
|
||||
cluster_configs={"HDFS": {"dfs.replication": 3}},
|
||||
net_id="test_neutron_id",
|
||||
anti_affinity=None
|
||||
anti_affinity=None,
|
||||
use_autoconfig=True
|
||||
)
|
||||
|
||||
self._test_atomic_action_timer(scenario.atomic_actions(),
|
||||
@ -279,7 +287,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
"auto_security_group": True,
|
||||
"security_groups": ["g1", "g2"],
|
||||
"node_configs": {"HDFS": {"local_config": "local_value"}},
|
||||
"is_proxy_gateway": True
|
||||
"is_proxy_gateway": True,
|
||||
"use_autoconfig": True,
|
||||
}, {
|
||||
"name": "worker-ng",
|
||||
"flavor_id": "test_flavor_w",
|
||||
@ -290,6 +299,7 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
"auto_security_group": True,
|
||||
"security_groups": ["g1", "g2"],
|
||||
"node_configs": {"HDFS": {"local_config": "local_value"}},
|
||||
"use_autoconfig": True,
|
||||
}, {
|
||||
"name": "proxy-ng",
|
||||
"flavor_id": "test_flavor_w",
|
||||
@ -301,7 +311,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
"auto_security_group": True,
|
||||
"security_groups": ["g1", "g2"],
|
||||
"node_configs": {"HDFS": {"local_config": "local_value"}},
|
||||
"is_proxy_gateway": True
|
||||
"is_proxy_gateway": True,
|
||||
"use_autoconfig": True,
|
||||
}
|
||||
]
|
||||
|
||||
@ -327,7 +338,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
security_groups=["g1", "g2"],
|
||||
workers_count=42,
|
||||
node_configs={"HDFS": {"local_config": "local_value"}},
|
||||
enable_proxy=True
|
||||
enable_proxy=True,
|
||||
use_autoconfig=True
|
||||
)
|
||||
|
||||
self.clients("sahara").clusters.create.assert_called_once_with(
|
||||
@ -338,7 +350,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
|
||||
default_image_id="test_image",
|
||||
cluster_configs={"HDFS": {"dfs.replication": 3}},
|
||||
net_id="test_neutron_id",
|
||||
anti_affinity=None
|
||||
anti_affinity=None,
|
||||
use_autoconfig=True
|
||||
)
|
||||
|
||||
self._test_atomic_action_timer(scenario.atomic_actions(),
|
||||
|
Loading…
x
Reference in New Issue
Block a user