[Sahara] Add support for proxy node

The Proxy Node feature allows to use one Floating IP for a Sahara
Cluster. This allows to provision large clusters where the number of
Floating IPs is limited.

Boolean enable_proxy paremeter is availble for both Sahara Cluster benchmark
and Sahara Cluster Context. The default value is False.

The feature is supported by Sahara kilo release or newer.

Change-Id: I479893290de140737a5e0ec5dbc19ea211f21c00
This commit is contained in:
Nikita Konovalov
2015-08-12 16:48:14 +03:00
committed by Pavel Boldin
parent eb32abef98
commit 8dc2ce967d
6 changed files with 179 additions and 13 deletions

View File

@@ -79,6 +79,9 @@ class SaharaCluster(context.Context):
},
"enable_anti_affinity": {
"type": "boolean"
},
"enable_proxy": {
"type": "boolean"
}
},
"additionalProperties": False,
@@ -121,6 +124,7 @@ class SaharaCluster(context.Context):
cluster_configs=self.config.get("cluster_configs"),
enable_anti_affinity=self.config.get("enable_anti_affinity",
False),
enable_proxy=self.config.get("enable_proxy", False),
wait_active=False
)

View File

@@ -41,7 +41,8 @@ class SaharaClusters(utils.SaharaScenario):
volumes_size=None, auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None,
enable_anti_affinity=False):
enable_anti_affinity=False,
enable_proxy=False):
"""Launch and delete a Sahara Cluster.
This scenario launches a Hadoop cluster, waits until it becomes
@@ -74,6 +75,8 @@ class SaharaClusters(utils.SaharaScenario):
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
do not assign floating ips to workers.
"""
image_id = self.context["tenant"]["sahara_image"]
@@ -93,7 +96,8 @@ class SaharaClusters(utils.SaharaScenario):
security_groups=security_groups,
node_configs=node_configs,
cluster_configs=cluster_configs,
enable_anti_affinity=enable_anti_affinity)
enable_anti_affinity=enable_anti_affinity,
enable_proxy=enable_proxy)
self._delete_cluster(cluster)
@@ -110,7 +114,8 @@ class SaharaClusters(utils.SaharaScenario):
auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None,
enable_anti_affinity=False):
enable_anti_affinity=False,
enable_proxy=False):
"""Launch, scale and delete a Sahara Cluster.
This scenario launches a Hadoop cluster, waits until it becomes
@@ -151,6 +156,8 @@ class SaharaClusters(utils.SaharaScenario):
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
do not assign floating ips to workers.
"""
image_id = self.context["tenant"]["sahara_image"]
@@ -170,7 +177,8 @@ class SaharaClusters(utils.SaharaScenario):
security_groups=security_groups,
node_configs=node_configs,
cluster_configs=cluster_configs,
enable_anti_affinity=enable_anti_affinity)
enable_anti_affinity=enable_anti_affinity,
enable_proxy=enable_proxy)
for delta in deltas:
# The Cluster is fetched every time so that its node groups have

View File

@@ -46,7 +46,9 @@ SAHARA_BENCHMARK_OPTS = [
help="A timeout in seconds for a Job Execution to complete"),
cfg.IntOpt("sahara_job_check_interval", default=5,
deprecated_name="job_check_interval",
help="Job Execution status polling interval in seconds")
help="Job Execution status polling interval in seconds"),
cfg.IntOpt("sahara_workers_per_proxy", default=20,
help="Amount of workers one proxy should serve to.")
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
@@ -160,7 +162,8 @@ class SaharaScenario(scenario.OpenStackScenario):
"instances to be unreachable.")
return None
def _setup_floating_ip_pool(self, node_groups, floating_ip_pool):
def _setup_floating_ip_pool(self, node_groups, floating_ip_pool,
enable_proxy):
if consts.Service.NEUTRON in self.clients("services").values():
LOG.debug("Neutron detected as networking backend.")
floating_ip_pool_value = self._setup_neutron_floating_ip_pool(
@@ -173,8 +176,18 @@ class SaharaScenario(scenario.OpenStackScenario):
if floating_ip_pool_value:
LOG.debug("Using floating ip pool %s." % floating_ip_pool_value)
# If the pool is set by any means assign it to all node groups.
for ng in node_groups:
ng["floating_ip_pool"] = floating_ip_pool_value
# If the proxy node feature is enabled, Master Node Group and
# Proxy Workers should have a floating ip pool set up
if enable_proxy:
proxy_groups = [x for x in node_groups
if x["name"] in ("master-ng", "proxy-ng")]
for ng in proxy_groups:
ng["is_proxy_gateway"] = True
ng["floating_ip_pool"] = floating_ip_pool_value
else:
for ng in node_groups:
ng["floating_ip_pool"] = floating_ip_pool_value
return node_groups
@@ -230,6 +243,7 @@ class SaharaScenario(scenario.OpenStackScenario):
volumes_size=None, auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None, enable_anti_affinity=False,
enable_proxy=False,
wait_active=True):
"""Create a cluster and wait until it becomes Active.
@@ -261,9 +275,18 @@ class SaharaScenario(scenario.OpenStackScenario):
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
do not assign floating ips to workers.
:param wait_active: Wait until a Cluster gets int "Active" state
:returns: created cluster
"""
if enable_proxy:
proxies_count = int(
workers_count / CONF.benchmark.sahara_workers_per_proxy)
else:
proxies_count = 0
node_groups = [
{
"name": "master-ng",
@@ -276,10 +299,19 @@ class SaharaScenario(scenario.OpenStackScenario):
"flavor_id": flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": workers_count
"count": workers_count - proxies_count
}
]
if proxies_count:
node_groups.append({
"name": "proxy-ng",
"flavor_id": flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": proxies_count
})
if "manager" in (sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]):
# Adding manager group separately as it is supported only in
@@ -294,7 +326,8 @@ class SaharaScenario(scenario.OpenStackScenario):
})
node_groups = self._setup_floating_ip_pool(node_groups,
floating_ip_pool)
floating_ip_pool,
enable_proxy)
neutron_net_id = self._get_neutron_net_id()

View File

@@ -89,6 +89,7 @@ class SaharaClusterTestCase(test.ScenarioTestCase):
node_configs=None,
cluster_configs=None,
enable_anti_affinity=False,
enable_proxy=False,
wait_active=False
))

View File

@@ -56,7 +56,8 @@ class SaharaClustersTestCase(test.ScenarioTestCase):
security_groups=None,
node_configs=None,
cluster_configs=None,
enable_anti_affinity=False)
enable_anti_affinity=False,
enable_proxy=False)
mock__delete_cluster.assert_called_once_with(
mock__launch_cluster.return_value)
@@ -99,7 +100,8 @@ class SaharaClustersTestCase(test.ScenarioTestCase):
security_groups=None,
node_configs=None,
cluster_configs=None,
enable_anti_affinity=False)
enable_anti_affinity=False,
enable_proxy=False)
mock__scale_cluster.assert_has_calls([
mock.call(self.clients("sahara").clusters.get.return_value, 1),

View File

@@ -192,6 +192,123 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value.id = (
"test_cluster_id")
self.clients("sahara").clusters.get.return_value.status = (
"active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}}
)
self.clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_with_proxy(self, mock_sahara_consts,
mock__generate_random_name):
context = {
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
}
self.clients("services").values.return_value = [
consts.Service.NEUTRON
]
scenario = utils.SaharaScenario(context=context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"is_proxy_gateway": True
}, {
"name": "worker-ng",
"flavor_id": "test_flavor",
"node_processes": ["p2"],
"volumes_per_node": 5,
"volumes_size": 10,
"count": 40,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}, {
"name": "proxy-ng",
"flavor_id": "test_flavor",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 2,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"is_proxy_gateway": True
}
]
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
@@ -209,7 +326,8 @@ class SaharaScenarioTestCase(test.ScenarioTestCase):
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}}
node_configs={"HDFS": {"local_config": "local_value"}},
enable_proxy=True
)
self.clients("sahara").clusters.create.assert_called_once_with(