[Sahara] Add Cloudera plugin support
Cloudera plugin added. The only supported version is 5. Small refactorings: * added manager role to set up Cloudera plugin properly * HDP v2 also now has a manager node group * node_count -> worker_count Change-Id: Ia21f50527950ca127dc2d9dc1fedd0a7b4892ab0
This commit is contained in:
@@ -45,9 +45,9 @@ class SaharaCluster(base.Context):
|
||||
"hadoop_version": {
|
||||
"type": "string",
|
||||
},
|
||||
"node_count": {
|
||||
"workers_count": {
|
||||
"type": "integer",
|
||||
"minimum": 2
|
||||
"minimum": 1
|
||||
},
|
||||
"flavor_id": {
|
||||
"type": "string",
|
||||
@@ -80,7 +80,7 @@ class SaharaCluster(base.Context):
|
||||
}
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["plugin_name", "hadoop_version", "node_count",
|
||||
"required": ["plugin_name", "hadoop_version", "workers_count",
|
||||
"flavor_id"]
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ class SaharaCluster(base.Context):
|
||||
plugin_name=self.config["plugin_name"],
|
||||
hadoop_version=self.config["hadoop_version"],
|
||||
flavor_id=self.config["flavor_id"],
|
||||
node_count=self.config["node_count"],
|
||||
workers_count=self.config["workers_count"],
|
||||
image_id=image_id,
|
||||
floating_ip_pool=floating_ip_pool,
|
||||
volumes_per_node=self.config.get("volumes_per_node"),
|
||||
|
||||
@@ -31,11 +31,11 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
floating_ip_pool=types.NeutronNetworkResourceType)
|
||||
@validation.flavor_exists('flavor')
|
||||
@validation.required_contexts("users", "sahara_image")
|
||||
@validation.number("node_count", minval=2, integer_only=True)
|
||||
@validation.number("workers_count", minval=1, integer_only=True)
|
||||
@validation.required_services(consts.Service.SAHARA)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["sahara"]})
|
||||
def create_and_delete_cluster(self, flavor, node_count, plugin_name,
|
||||
def create_and_delete_cluster(self, flavor, workers_count, plugin_name,
|
||||
hadoop_version, floating_ip_pool=None,
|
||||
volumes_per_node=None,
|
||||
volumes_size=None, auto_security_group=None,
|
||||
@@ -48,7 +48,7 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
|
||||
:param flavor: Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: total number of instances in a cluster (>= 2)
|
||||
:param workers_count: number of worker instances in a cluster
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
@@ -80,7 +80,7 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
cluster = self._launch_cluster(
|
||||
flavor_id=flavor,
|
||||
image_id=image_id,
|
||||
node_count=node_count,
|
||||
workers_count=workers_count,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version,
|
||||
floating_ip_pool=floating_ip_pool,
|
||||
@@ -97,9 +97,9 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
@validation.flavor_exists('flavor')
|
||||
@validation.required_services(consts.Service.SAHARA)
|
||||
@validation.required_contexts("users", "sahara_image")
|
||||
@validation.number("node_count", minval=2, integer_only=True)
|
||||
@validation.number("workers_count", minval=1, integer_only=True)
|
||||
@base.scenario(context={"cleanup": ["sahara"]})
|
||||
def create_scale_delete_cluster(self, flavor, node_count, plugin_name,
|
||||
def create_scale_delete_cluster(self, flavor, workers_count, plugin_name,
|
||||
hadoop_version, deltas,
|
||||
floating_ip_pool=None,
|
||||
volumes_per_node=None, volumes_size=None,
|
||||
@@ -116,7 +116,7 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
|
||||
:param flavor: Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: total number of instances in a cluster (>= 2)
|
||||
:param workers_count: number of worker instances in a cluster
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
@@ -153,7 +153,7 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
cluster = self._launch_cluster(
|
||||
flavor_id=flavor,
|
||||
image_id=image_id,
|
||||
node_count=node_count,
|
||||
workers_count=workers_count,
|
||||
plugin_name=plugin_name,
|
||||
hadoop_version=hadoop_version,
|
||||
floating_ip_pool=floating_ip_pool,
|
||||
|
||||
@@ -75,13 +75,23 @@ class SaharaScenario(base.Scenario):
|
||||
'MAPREDUCE_CLIENT', 'OOZIE_CLIENT', 'PIG']
|
||||
},
|
||||
"2.0.6": {
|
||||
"master": ['NAMENODE', 'SECONDARY_NAMENODE',
|
||||
'ZOOKEEPER_SERVER', 'AMBARI_SERVER',
|
||||
'HISTORYSERVER', 'RESOURCEMANAGER',
|
||||
'GANGLIA_SERVER', 'NAGIOS_SERVER', 'OOZIE_SERVER'],
|
||||
"worker": ['HDFS_CLIENT', 'DATANODE', 'ZOOKEEPER_CLIENT',
|
||||
'MAPREDUCE2_CLIENT', 'YARN_CLIENT', 'NODEMANAGER',
|
||||
'PIG', 'OOZIE_CLIENT']
|
||||
"manager": ["AMBARI_SERVER", "GANGLIA_SERVER",
|
||||
"NAGIOS_SERVER"],
|
||||
"master": ["NAMENODE", "SECONDARY_NAMENODE",
|
||||
"ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT",
|
||||
"HISTORYSERVER", "RESOURCEMANAGER",
|
||||
"OOZIE_SERVER"],
|
||||
"worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT",
|
||||
"PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT",
|
||||
"NODEMANAGER", "OOZIE_CLIENT"]
|
||||
}
|
||||
},
|
||||
"cdh": {
|
||||
"5": {
|
||||
"manager": ["MANAGER"],
|
||||
"master": ["NAMENODE", "SECONDARYNAMENODE", "RESOURCEMANAGER",
|
||||
"JOBHISTORY", "OOZIE_SERVER"],
|
||||
"worker": ["DATANODE", "NODEMANAGER"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -110,6 +120,12 @@ class SaharaScenario(base.Scenario):
|
||||
"target": "HDFS",
|
||||
"config_name": "dfs.replication"
|
||||
}
|
||||
},
|
||||
"cdh": {
|
||||
"5": {
|
||||
"target": "HDFS",
|
||||
"config_name": "dfs_replication"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,9 +281,9 @@ class SaharaScenario(base.Scenario):
|
||||
|
||||
return node_groups
|
||||
|
||||
def _setup_replication_config(self, hadoop_version, node_count,
|
||||
def _setup_replication_config(self, hadoop_version, workers_count,
|
||||
plugin_name):
|
||||
replication_value = min(node_count - 1, 3)
|
||||
replication_value = min(workers_count, 3)
|
||||
# 3 is a default Hadoop replication
|
||||
conf = self.REPLICATION_CONFIGS[plugin_name][hadoop_version]
|
||||
LOG.debug("Using replication factor: %s" % replication_value)
|
||||
@@ -280,7 +296,7 @@ class SaharaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('sahara.launch_cluster')
|
||||
def _launch_cluster(self, plugin_name, hadoop_version, flavor_id,
|
||||
image_id, node_count, floating_ip_pool=None,
|
||||
image_id, workers_count, floating_ip_pool=None,
|
||||
volumes_per_node=None,
|
||||
volumes_size=None, auto_security_group=None,
|
||||
security_groups=None, node_configs=None,
|
||||
@@ -295,8 +311,9 @@ class SaharaScenario(base.Scenario):
|
||||
:param hadoop_version: Hadoop version supported by the plugin
|
||||
:param flavor_id: flavor which will be used to create instances
|
||||
:param image_id: image id that will be used to boot instances
|
||||
:param node_count: total number of instances. 1 master node, others
|
||||
for the workers
|
||||
:param workers_count: number of worker instances. All plugins will
|
||||
also add one Master instance and some plugins
|
||||
add a Manager instance.
|
||||
:param floating_ip_pool: floating ip pool name from which Floating
|
||||
IPs will be allocated
|
||||
:param volumes_per_node: number of Cinder volumes that will be
|
||||
@@ -327,10 +344,22 @@ class SaharaScenario(base.Scenario):
|
||||
"flavor_id": flavor_id,
|
||||
"node_processes": self.NODE_PROCESSES[plugin_name]
|
||||
[hadoop_version]["worker"],
|
||||
"count": node_count - 1
|
||||
"count": workers_count
|
||||
}
|
||||
]
|
||||
|
||||
if "manager" in self.NODE_PROCESSES[plugin_name][hadoop_version]:
|
||||
# Adding manager group separately as it is supported only in
|
||||
# specific configurations.
|
||||
|
||||
node_groups.append({
|
||||
"name": "manager-ng",
|
||||
"flavor_id": flavor_id,
|
||||
"node_processes": self.NODE_PROCESSES[plugin_name]
|
||||
[hadoop_version]["manager"],
|
||||
"count": 1
|
||||
})
|
||||
|
||||
node_groups = self._setup_floating_ip_pool(node_groups,
|
||||
floating_ip_pool)
|
||||
|
||||
@@ -346,7 +375,7 @@ class SaharaScenario(base.Scenario):
|
||||
node_groups = self._setup_node_configs(node_groups, node_configs)
|
||||
|
||||
replication_config = self._setup_replication_config(hadoop_version,
|
||||
node_count,
|
||||
workers_count,
|
||||
plugin_name)
|
||||
|
||||
# The replication factor should be set for small clusters. However the
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"flavor": {
|
||||
"name": "m1.small"
|
||||
},
|
||||
"node_count": 2,
|
||||
"workers_count": 3,
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.3.0",
|
||||
"auto_security_group": true
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
args:
|
||||
flavor:
|
||||
name: "m1.small"
|
||||
node_count: 2
|
||||
workers_count: 3
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "2.3.0"
|
||||
auto_security_group: True
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"flavor": {
|
||||
"name": "m1.small"
|
||||
},
|
||||
"node_count": 2,
|
||||
"workers_count": 3,
|
||||
"deltas": [1, -1, 1, -1],
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "2.3.0",
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
args:
|
||||
flavor:
|
||||
name: "m1.small"
|
||||
node_count: 2
|
||||
workers_count: 3
|
||||
deltas:
|
||||
- 1
|
||||
- -1
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
},
|
||||
"sahara_cluster": {
|
||||
"flavor_id": "2",
|
||||
"node_count": 2,
|
||||
"workers_count": 3,
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "1.2.1",
|
||||
"auto_security_group": true
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-test/1.2.1/hadoop-test-1.2.1.jar"
|
||||
sahara_cluster:
|
||||
flavor_id: "2"
|
||||
node_count: 2
|
||||
workers_count: 3
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "1.2.1"
|
||||
auto_security_group: True
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
},
|
||||
"sahara_cluster": {
|
||||
"flavor_id": "2",
|
||||
"node_count": 2,
|
||||
"workers_count": 3,
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "1.2.1",
|
||||
"auto_security_group": true
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-test/1.2.1/hadoop-test-1.2.1.jar"
|
||||
sahara_cluster:
|
||||
flavor_id: "2"
|
||||
node_count: 2
|
||||
workers_count: 3
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "1.2.1"
|
||||
auto_security_group: True
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
},
|
||||
"sahara_cluster": {
|
||||
"flavor_id": "2",
|
||||
"node_count": 2,
|
||||
"workers_count": 3,
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "1.2.1",
|
||||
"auto_security_group": true
|
||||
|
||||
@@ -33,7 +33,7 @@
|
||||
download_url: "http://repo1.maven.org/maven2/org/apache/hadoop/hadoop-examples/1.2.1/hadoop-examples-1.2.1.jar"
|
||||
sahara_cluster:
|
||||
flavor_id: "2"
|
||||
node_count: 2
|
||||
workers_count: 3
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "1.2.1"
|
||||
auto_security_group: True
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
},
|
||||
"sahara_cluster": {
|
||||
"flavor_id": "2",
|
||||
"node_count": 2,
|
||||
"workers_count": 3,
|
||||
"plugin_name": "vanilla",
|
||||
"hadoop_version": "1.2.1",
|
||||
"auto_security_group": true
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
download_url: "https://github.com/openstack/sahara/blob/master/etc/edp-examples/pig-job/udf.jar?raw=true"
|
||||
sahara_cluster:
|
||||
flavor_id: "2"
|
||||
node_count: 2
|
||||
workers_count: 3
|
||||
plugin_name: "vanilla"
|
||||
hadoop_version: "1.2.1"
|
||||
auto_security_group: True
|
||||
|
||||
@@ -58,7 +58,7 @@ class SaharaClusterTestCase(test.TestCase):
|
||||
},
|
||||
"sahara_cluster": {
|
||||
"flavor_id": "test_flavor",
|
||||
"node_count": 2,
|
||||
"workers_count": 2,
|
||||
"plugin_name": "test_plugin",
|
||||
"hadoop_version": "test_version"
|
||||
}
|
||||
@@ -88,7 +88,7 @@ class SaharaClusterTestCase(test.TestCase):
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
flavor_id="test_flavor",
|
||||
node_count=2,
|
||||
workers_count=2,
|
||||
image_id=ctx["tenants"][i]["sahara_image"],
|
||||
floating_ip_pool=None,
|
||||
volumes_per_node=None,
|
||||
@@ -127,7 +127,7 @@ class SaharaClusterTestCase(test.TestCase):
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
flavor_id="test_flavor",
|
||||
node_count=2,
|
||||
workers_count=2,
|
||||
image_id=ctx["tenants"][i]["sahara_image"],
|
||||
floating_ip_pool=None,
|
||||
volumes_per_node=None,
|
||||
|
||||
@@ -40,14 +40,14 @@ class SaharaClustersTestCase(test.TestCase):
|
||||
}
|
||||
clusters_scenario.create_and_delete_cluster(
|
||||
flavor="test_flavor",
|
||||
node_count=5,
|
||||
workers_count=5,
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
|
||||
mock_launch_cluster.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
image_id="test_image",
|
||||
node_count=5,
|
||||
workers_count=5,
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
floating_ip_pool=None,
|
||||
@@ -85,7 +85,7 @@ class SaharaClustersTestCase(test.TestCase):
|
||||
|
||||
clusters_scenario.create_scale_delete_cluster(
|
||||
flavor="test_flavor",
|
||||
node_count=5,
|
||||
workers_count=5,
|
||||
deltas=[1, -1],
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version")
|
||||
@@ -93,7 +93,7 @@ class SaharaClustersTestCase(test.TestCase):
|
||||
mock_launch_cluster.assert_called_once_with(
|
||||
flavor_id="test_flavor",
|
||||
image_id="test_image",
|
||||
node_count=5,
|
||||
workers_count=5,
|
||||
plugin_name="test_plugin",
|
||||
hadoop_version="test_version",
|
||||
floating_ip_pool=None,
|
||||
|
||||
@@ -174,7 +174,7 @@ class SaharaUtilsTestCase(test.TestCase):
|
||||
"floating_ip_pool": floating_ip_pool_uuid,
|
||||
"volumes_per_node": 5,
|
||||
"volumes_size": 10,
|
||||
"count": 41,
|
||||
"count": 42,
|
||||
"auto_security_group": True,
|
||||
"security_groups": ["g1", "g2"],
|
||||
"node_configs": {"HDFS": {"local_config": "local_value"}},
|
||||
@@ -200,7 +200,7 @@ class SaharaUtilsTestCase(test.TestCase):
|
||||
volumes_size=10,
|
||||
auto_security_group=True,
|
||||
security_groups=["g1", "g2"],
|
||||
node_count=42,
|
||||
workers_count=42,
|
||||
node_configs={"HDFS": {"local_config": "local_value"}}
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ class SaharaUtilsTestCase(test.TestCase):
|
||||
floating_ip_pool="test_pool",
|
||||
volumes_per_node=5,
|
||||
volumes_size=10,
|
||||
node_count=42,
|
||||
workers_count=42,
|
||||
node_configs={"HDFS": {"local_config":
|
||||
"local_value"}})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user