Change CDH plugin Processes Show_names

We change the Processes shownames of Cloudera plugin, so that users
will not be confused by the names without hint.

Closes-bug: #1411181
Change-Id: I666c69d9376e368bdd4c75abfdeadf853e0956d3
This commit is contained in:
Ken Chen
2015-01-22 16:09:22 +08:00
parent 0527e0a67f
commit 8a8be3162a
6 changed files with 173 additions and 138 deletions

View File

@@ -197,10 +197,10 @@ def get_role_name(instance, service):
'REGIONSERVER': 'RS',
'RESOURCEMANAGER': 'RM',
'SECONDARYNAMENODE': 'SNN',
'SENTRY_SERVER': 'SS',
'SENTRY_SERVER': 'SNT',
'SERVER': 'S',
'SERVICEMONITOR': 'SM',
'SOLR_SERVER': 'SS',
'SOLR_SERVER': 'SLR',
'SPARK_YARN_HISTORY_SERVER': 'SHS',
'SQOOP_SERVER': 'S2S',
'STATESTORE': 'ISS',

View File

@@ -114,11 +114,11 @@ def _get_configs(service, cluster=None, node_group=None):
all_confs = {}
if cluster:
zk_count = v._get_inst_count(cluster, 'SERVER')
hbm_count = v._get_inst_count(cluster, 'MASTER')
ss_count = v._get_inst_count(cluster, 'SENTRY_SERVER')
ks_count = v._get_inst_count(cluster, 'HBASE_INDEXER')
imp_count = v._get_inst_count(cluster, 'CATALOGSERVER')
zk_count = v._get_inst_count(cluster, 'ZOOKEEPER_SERVER')
hbm_count = v._get_inst_count(cluster, 'HBASE_MASTER')
snt_count = v._get_inst_count(cluster, 'SENTRY_SERVER')
ks_count = v._get_inst_count(cluster, 'KEY_VALUE_STORE_INDEXER')
imp_count = v._get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
core_site_safety_valve = ''
if c_helper.is_swift_enabled(cluster):
configs = swift_helper.get_swift_configs()
@@ -150,7 +150,7 @@ def _get_configs(service, cluster=None, node_group=None):
'HUE': {
'hive_service': cu.HIVE_SERVICE_NAME,
'oozie_service': cu.OOZIE_SERVICE_NAME,
'sentry_service': cu.SENTRY_SERVICE_NAME if ss_count else '',
'sentry_service': cu.SENTRY_SERVICE_NAME if snt_count else '',
'zookeeper_service':
cu.ZOOKEEPER_SERVICE_NAME if zk_count else ''
},
@@ -281,16 +281,16 @@ def scale_cluster(cluster, instances):
_configure_instance(instance)
cu.update_configs(instance)
if 'DATANODE' in instance.node_group.node_processes:
if 'HDFS_DATANODE' in instance.node_group.node_processes:
cu.refresh_nodes(cluster, 'DATANODE', cu.HDFS_SERVICE_NAME)
_configure_swift_to_inst(instance)
if 'DATANODE' in instance.node_group.node_processes:
if 'HDFS_DATANODE' in instance.node_group.node_processes:
hdfs = cu.get_service('DATANODE', instance=instance)
cu.start_roles(hdfs, cu.get_role_name(instance, 'DATANODE'))
if 'NODEMANAGER' in instance.node_group.node_processes:
if 'YARN_NODEMANAGER' in instance.node_group.node_processes:
yarn = cu.get_service('NODEMANAGER', instance=instance)
cu.start_roles(yarn, cu.get_role_name(instance, 'NODEMANAGER'))
@@ -299,9 +299,9 @@ def decommission_cluster(cluster, instances):
dns = []
nms = []
for i in instances:
if 'DATANODE' in i.node_group.node_processes:
if 'HDFS_DATANODE' in i.node_group.node_processes:
dns.append(cu.get_role_name(i, 'DATANODE'))
if 'NODEMANAGER' in i.node_group.node_processes:
if 'YARN_NODEMANAGER' in i.node_group.node_processes:
nms.append(cu.get_role_name(i, 'NODEMANAGER'))
if dns:
@@ -543,9 +543,10 @@ def _configure_instance(instance):
def _add_role(instance, process):
if process in ['MANAGER']:
if process in ['CLOUDERA_MANAGER']:
return
process = pu.convert_role_showname(process)
service = cu.get_service(process, instance=instance)
role = service.create_role(cu.get_role_name(instance, process),
process, instance.fqdn())
@@ -640,29 +641,29 @@ def get_open_ports(node_group):
ports = [9000] # for CM agent
ports_map = {
'MANAGER': [7180, 7182, 7183, 7432, 7184, 8084, 8086, 10101,
9997, 9996, 8087, 9998, 9999, 8085, 9995, 9994],
'NAMENODE': [8020, 8022, 50070, 50470],
'SECONDARYNAMENODE': [50090, 50495],
'DATANODE': [50010, 1004, 50075, 1006, 50020],
'RESOURCEMANAGER': [8030, 8031, 8032, 8033, 8088],
'NODEMANAGER': [8040, 8041, 8042],
'JOBHISTORY': [10020, 19888],
'HIVEMETASTORE': [9083],
'HIVESERVER2': [10000],
'CLOUDERA_MANAGER': [7180, 7182, 7183, 7432, 7184, 8084, 8086, 10101,
9997, 9996, 8087, 9998, 9999, 8085, 9995, 9994],
'HDFS_NAMENODE': [8020, 8022, 50070, 50470],
'HDFS_SECONDARYNAMENODE': [50090, 50495],
'HDFS_DATANODE': [50010, 1004, 50075, 1006, 50020],
'YARN_RESOURCEMANAGER': [8030, 8031, 8032, 8033, 8088],
'YARN_NODEMANAGER': [8040, 8041, 8042],
'YARN_JOBHISTORY': [10020, 19888],
'HIVE_METASTORE': [9083],
'HIVE_SERVER2': [10000],
'HUE_SERVER': [8888],
'OOZIE_SERVER': [11000, 11001],
'SPARK_YARN_HISTORY_SERVER': [18088],
'SERVER': [2181, 3181, 4181, 9010],
'MASTER': [60000],
'REGIONSERVER': [60020],
'AGENT': [41414],
'ZOOKEEPER_SERVER': [2181, 3181, 4181, 9010],
'HBASE_MASTER': [60000],
'HBASE_REGIONSERVER': [60020],
'FLUME_AGENT': [41414],
'SENTRY_SERVER': [8038],
'SOLR_SERVER': [8983, 8984],
'SQOOP_SERVER': [8005, 12000],
'HBASE_INDEXER': [],
'CATALOGSERVER': [25020, 26000],
'STATESTORE': [25010, 24000],
'KEY_VALUE_STORE_INDEXER': [],
'IMPALA_CATALOGSERVER': [25020, 26000],
'IMPALA_STATESTORE': [25010, 24000],
'IMPALAD': [21050, 21000, 23000, 25000, 28000, 22000]
}

View File

@@ -41,32 +41,32 @@ class CDHPluginProvider(p.ProvisioningPluginBase):
def get_node_processes(self, hadoop_version):
processes = {
"CLOUDERA": ['MANAGER'],
"CLOUDERA": ['CLOUDERA_MANAGER'],
"HDFS": [],
"NAMENODE": ['NAMENODE'],
"DATANODE": ['DATANODE'],
"SECONDARYNAMENODE": ['SECONDARYNAMENODE'],
"NAMENODE": ['HDFS_NAMENODE'],
"DATANODE": ['HDFS_DATANODE'],
"SECONDARYNAMENODE": ['HDFS_SECONDARYNAMENODE'],
"YARN": [],
"RESOURCEMANAGER": ['RESOURCEMANAGER'],
"NODEMANAGER": ['NODEMANAGER'],
"JOBHISTORY": ['JOBHISTORY'],
"RESOURCEMANAGER": ['YARN_RESOURCEMANAGER'],
"NODEMANAGER": ['YARN_NODEMANAGER'],
"JOBHISTORY": ['YARN_JOBHISTORY'],
"OOZIE": ['OOZIE_SERVER'],
"HIVE": [],
"HIVESERVER": ['HIVESERVER2'],
"HIVEMETASTORE": ['HIVEMETASTORE'],
"WEBHCAT": ['WEBHCAT'],
"HIVESERVER": ['HIVE_SERVER2'],
"HIVEMETASTORE": ['HIVE_METASTORE'],
"WEBHCAT": ['HIVE_WEBHCAT'],
"HUE": ['HUE_SERVER'],
"SPARK_ON_YARN": ['SPARK_YARN_HISTORY_SERVER'],
"ZOOKEEPER": ['SERVER'],
"ZOOKEEPER": ['ZOOKEEPER_SERVER'],
"HBASE": [],
"MASTER": ['MASTER'],
"REGIONSERVER": ['REGIONSERVER'],
"FLUME": ['AGENT'],
"MASTER": ['HBASE_MASTER'],
"REGIONSERVER": ['HBASE_REGIONSERVER'],
"FLUME": ['FLUME_AGENT'],
"IMPALA": [],
"CATALOGSERVER": ['CATALOGSERVER'],
"STATESTORE": ['STATESTORE'],
"CATALOGSERVER": ['IMPALA_CATALOGSERVER'],
"STATESTORE": ['IMPALA_STATESTORE'],
"IMPALAD": ['IMPALAD'],
"KS_INDEXER": ['HBASE_INDEXER'],
"KS_INDEXER": ['KEY_VALUE_STORE_INDEXER'],
"SOLR": ['SOLR_SERVER'],
"SQOOP": ['SQOOP_SERVER']
}

View File

@@ -18,43 +18,43 @@ from sahara.plugins import utils as u
def get_manager(cluster):
return u.get_instance(cluster, 'MANAGER')
return u.get_instance(cluster, 'CLOUDERA_MANAGER')
def get_namenode(cluster):
return u.get_instance(cluster, "NAMENODE")
return u.get_instance(cluster, 'HDFS_NAMENODE')
def get_secondarynamenode(cluster):
return u.get_instance(cluster, 'HDFS_SECONDARYNAMENODE')
def get_datanodes(cluster):
return u.get_instances(cluster, 'HDFS_DATANODE')
def get_resourcemanager(cluster):
return u.get_instance(cluster, 'RESOURCEMANAGER')
return u.get_instance(cluster, 'YARN_RESOURCEMANAGER')
def get_nodemanagers(cluster):
return u.get_instances(cluster, 'NODEMANAGER')
return u.get_instances(cluster, 'YARN_NODEMANAGER')
def get_historyserver(cluster):
return u.get_instance(cluster, 'YARN_JOBHISTORY')
def get_oozie(cluster):
return u.get_instance(cluster, 'OOZIE_SERVER')
def get_datanodes(cluster):
return u.get_instances(cluster, 'DATANODE')
def get_secondarynamenode(cluster):
return u.get_instance(cluster, 'SECONDARYNAMENODE')
def get_historyserver(cluster):
return u.get_instance(cluster, 'JOBHISTORY')
def get_hive_metastore(cluster):
return u.get_instance(cluster, 'HIVEMETASTORE')
return u.get_instance(cluster, 'HIVE_METASTORE')
def get_hive_servers(cluster):
return u.get_instances(cluster, 'HIVESERVER2')
return u.get_instances(cluster, 'HIVE_SERVER2')
def get_hue(cluster):
@@ -66,15 +66,15 @@ def get_spark_historyserver(cluster):
def get_zookeepers(cluster):
return u.get_instances(cluster, 'SERVER')
return u.get_instances(cluster, 'ZOOKEEPER_SERVER')
def get_hbase_master(cluster):
return u.get_instance(cluster, 'MASTER')
return u.get_instance(cluster, 'HBASE_MASTER')
def get_flumes(cluster):
return u.get_instances(cluster, 'AGENT')
return u.get_instances(cluster, 'FLUME_AGENT')
def get_sentry(cluster):
@@ -90,15 +90,15 @@ def get_sqoop(cluster):
def get_hbase_indexers(cluster):
return u.get_instances(cluster, 'HBASE_INDEXER')
return u.get_instances(cluster, 'KEY_VALUE_STORE_INDEXER')
def get_catalogserver(cluster):
return u.get_instance(cluster, 'CATALOGSERVER')
return u.get_instance(cluster, 'IMPALA_CATALOGSERVER')
def get_statestore(cluster):
return u.get_instance(cluster, 'STATESTORE')
return u.get_instance(cluster, 'IMPALA_STATESTORE')
def get_impalads(cluster):
@@ -141,3 +141,33 @@ def convert_process_configs(configs):
newkey = p_dict[k][0]
configs[newkey] = item
return res.Resource(configs)
def convert_role_showname(showname):
name_dict = {
'CLOUDERA_MANAGER': 'MANAGER',
'HDFS_NAMENODE': 'NAMENODE',
'HDFS_DATANODE': 'DATANODE',
'HDFS_SECONDARYNAMENODE': 'SECONDARYNAMENODE',
'YARN_RESOURCEMANAGER': 'RESOURCEMANAGER',
'YARN_NODEMANAGER': 'NODEMANAGER',
'YARN_JOBHISTORY': 'JOBHISTORY',
'OOZIE_SERVER': 'OOZIE_SERVER',
'HIVE_SERVER2': 'HIVESERVER2',
'HIVE_METASTORE': 'HIVEMETASTORE',
'HIVE_WEBHCAT': 'WEBHCAT',
'HUE_SERVER': 'HUE_SERVER',
'SPARK_YARN_HISTORY_SERVER': 'SPARK_YARN_HISTORY_SERVER',
'ZOOKEEPER_SERVER': 'SERVER',
'HBASE_MASTER': 'MASTER',
'HBASE_REGIONSERVER': 'REGIONSERVER',
'FLUME_AGENT': 'AGENT',
'IMPALA_CATALOGSERVER': 'CATALOGSERVER',
'IMPALA_STATESTORE': 'STATESTORE',
'IMPALAD': 'IMPALAD',
'KEY_VALUE_STORE_INDEXER': 'HBASE_INDEXER',
'SENTRY_SERVER': 'SENTRY_SERVER',
'SOLR_SERVER': 'SOLR_SERVER',
'SQOOP_SERVER': 'SQOOP_SERVER',
}
return name_dict.get(showname, None)

View File

@@ -31,41 +31,42 @@ def validate_cluster_creating(cluster):
"'cm_api' package version 6.0.2 or later."))
raise ex.HadoopProvisionError(_("'cm_api' is not installed."))
mng_count = _get_inst_count(cluster, 'MANAGER')
mng_count = _get_inst_count(cluster, 'CLOUDERA_MANAGER')
if mng_count != 1:
raise ex.InvalidComponentCountException('MANAGER', 1, mng_count)
raise ex.InvalidComponentCountException('CLOUDERA_MANAGER',
1, mng_count)
nn_count = _get_inst_count(cluster, 'NAMENODE')
nn_count = _get_inst_count(cluster, 'HDFS_NAMENODE')
if nn_count != 1:
raise ex.InvalidComponentCountException('NAMENODE', 1, nn_count)
raise ex.InvalidComponentCountException('HDFS_NAMENODE', 1, nn_count)
snn_count = _get_inst_count(cluster, 'SECONDARYNAMENODE')
snn_count = _get_inst_count(cluster, 'HDFS_SECONDARYNAMENODE')
if snn_count != 1:
raise ex.InvalidComponentCountException('SECONDARYNAMENODE', 1,
raise ex.InvalidComponentCountException('HDFS_SECONDARYNAMENODE', 1,
snn_count)
rm_count = _get_inst_count(cluster, 'RESOURCEMANAGER')
rm_count = _get_inst_count(cluster, 'YARN_RESOURCEMANAGER')
if rm_count not in [0, 1]:
raise ex.InvalidComponentCountException('RESOURCEMANAGER', _('0 or 1'),
rm_count)
raise ex.InvalidComponentCountException('YARN_RESOURCEMANAGER',
_('0 or 1'), rm_count)
hs_count = _get_inst_count(cluster, 'JOBHISTORY')
hs_count = _get_inst_count(cluster, 'YARN_JOBHISTORY')
if hs_count not in [0, 1]:
raise ex.InvalidComponentCountException('JOBHISTORY', _('0 or 1'),
hs_count)
raise ex.InvalidComponentCountException('YARN_JOBHISTORY',
_('0 or 1'), hs_count)
if rm_count > 0 and hs_count < 1:
raise ex.RequiredServiceMissingException('JOBHISTORY',
required_by='RESOURCEMANAGER')
raise ex.RequiredServiceMissingException(
'YARN_JOBHISTORY', required_by='YARN_RESOURCEMANAGER')
nm_count = _get_inst_count(cluster, 'NODEMANAGER')
nm_count = _get_inst_count(cluster, 'YARN_NODEMANAGER')
if rm_count == 0:
if nm_count > 0:
raise ex.RequiredServiceMissingException('RESOURCEMANAGER',
required_by='NODEMANAGER')
raise ex.RequiredServiceMissingException(
'YARN_RESOURCEMANAGER', required_by='YARN_NODEMANAGER')
oo_count = _get_inst_count(cluster, 'OOZIE_SERVER')
dn_count = _get_inst_count(cluster, 'DATANODE')
dn_count = _get_inst_count(cluster, 'HDFS_DATANODE')
if oo_count not in [0, 1]:
raise ex.InvalidComponentCountException('OOZIE_SERVER', _('0 or 1'),
oo_count)
@@ -73,35 +74,35 @@ def validate_cluster_creating(cluster):
if oo_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='OOZIE_SERVER')
'HDFS_DATANODE', required_by='OOZIE_SERVER')
if nm_count < 1:
raise ex.RequiredServiceMissingException(
'NODEMANAGER', required_by='OOZIE_SERVER')
'YARN_NODEMANAGER', required_by='OOZIE_SERVER')
if hs_count != 1:
raise ex.RequiredServiceMissingException(
'JOBHISTORY', required_by='OOZIE_SERVER')
'YARN_JOBHISTORY', required_by='OOZIE_SERVER')
hms_count = _get_inst_count(cluster, 'HIVEMETASTORE')
hvs_count = _get_inst_count(cluster, 'HIVESERVER2')
whc_count = _get_inst_count(cluster, 'WEBHCAT')
hms_count = _get_inst_count(cluster, 'HIVE_METASTORE')
hvs_count = _get_inst_count(cluster, 'HIVE_SERVER2')
whc_count = _get_inst_count(cluster, 'HIVE_WEBHCAT')
if hms_count and rm_count < 1:
raise ex.RequiredServiceMissingException(
'RESOURCEMANAGER', required_by='HIVEMETASTORE')
'YARN_RESOURCEMANAGER', required_by='HIVE_METASTORE')
if hms_count and not hvs_count:
raise ex.RequiredServiceMissingException(
'HIVESERVER2', required_by='HIVEMETASTORE')
'HIVE_SERVER2', required_by='HIVE_METASTORE')
if hvs_count and not hms_count:
raise ex.RequiredServiceMissingException(
'HIVEMETASTORE', required_by='HIVESERVER2')
'HIVE_METASTORE', required_by='HIVE_SERVER2')
if whc_count and not hms_count:
raise ex.RequiredServiceMissingException(
'HIVEMETASTORE', required_by='WEBHCAT')
'HIVE_METASTORE', required_by='HIVE_WEBHCAT')
hue_count = _get_inst_count(cluster, 'HUE_SERVER')
if hue_count not in [0, 1]:
@@ -114,7 +115,7 @@ def validate_cluster_creating(cluster):
'0 or 1', shs_count)
if shs_count and not rm_count:
raise ex.RequiredServiceMissingException(
'RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')
'YARN_RESOURCEMANAGER', required_by='SPARK_YARN_HISTORY_SERVER')
if oo_count < 1 and hue_count:
raise ex.RequiredServiceMissingException(
@@ -122,46 +123,46 @@ def validate_cluster_creating(cluster):
if hms_count < 1 and hue_count:
raise ex.RequiredServiceMissingException(
'HIVEMETASTORE', required_by='HUE_SERVER')
'HIVE_METASTORE', required_by='HUE_SERVER')
hbm_count = _get_inst_count(cluster, 'MASTER')
hbr_count = _get_inst_count(cluster, 'REGIONSERVER')
zk_count = _get_inst_count(cluster, 'SERVER')
hbm_count = _get_inst_count(cluster, 'HBASE_MASTER')
hbr_count = _get_inst_count(cluster, 'HBASE_REGIONSERVER')
zk_count = _get_inst_count(cluster, 'ZOOKEEPER_SERVER')
if hbm_count >= 1:
if zk_count < 1:
raise ex.RequiredServiceMissingException('ZOOKEEPER',
required_by='HBASE')
if hbr_count < 1:
raise ex.InvalidComponentCountException('REGIONSERVER',
_('at least 1'), hbr_count)
raise ex.InvalidComponentCountException(
'HBASE_REGIONSERVER', _('at least 1'), hbr_count)
elif hbr_count >= 1:
raise ex.InvalidComponentCountException('MASTER',
raise ex.InvalidComponentCountException('HBASE_MASTER',
_('at least 1'), hbm_count)
a_count = _get_inst_count(cluster, 'AGENT')
a_count = _get_inst_count(cluster, 'FLUME_AGENT')
if a_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='FLUME_AGENT')
'HDFS_DATANODE', required_by='FLUME_AGENT')
ss1_count = _get_inst_count(cluster, 'SENTRY_SERVER')
if ss1_count not in [0, 1]:
snt_count = _get_inst_count(cluster, 'SENTRY_SERVER')
if snt_count not in [0, 1]:
raise ex.InvalidComponentCountException('SENTRY_SERVER', _('0 or 1'),
ss1_count)
if ss1_count == 1:
snt_count)
if snt_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='SENTRY_SERVER')
'HDFS_DATANODE', required_by='SENTRY_SERVER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='SENTRY_SERVER')
ss2_count = _get_inst_count(cluster, 'SOLR_SERVER')
if ss2_count >= 1:
slr_count = _get_inst_count(cluster, 'SOLR_SERVER')
if slr_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='SOLR_SERVER')
'HDFS_DATANODE', required_by='SOLR_SERVER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='SOLR_SERVER')
@@ -173,51 +174,51 @@ def validate_cluster_creating(cluster):
if s2s_count == 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='SQOOP_SERVER')
'HDFS_DATANODE', required_by='SQOOP_SERVER')
if nm_count < 1:
raise ex.RequiredServiceMissingException(
'NODEMANAGER', required_by='SQOOP_SERVER')
'YARN_NODEMANAGER', required_by='SQOOP_SERVER')
if hs_count != 1:
raise ex.RequiredServiceMissingException(
'JOBHISTORY', required_by='SQOOP_SERVER')
'YARN_JOBHISTORY', required_by='SQOOP_SERVER')
lhbi_count = _get_inst_count(cluster, 'HBASE_INDEXER')
if lhbi_count >= 1:
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='HBASE_INDEXER')
'HDFS_DATANODE', required_by='HBASE_INDEXER')
if zk_count < 1:
raise ex.RequiredServiceMissingException(
'ZOOKEEPER', required_by='HBASE_INDEXER')
if ss2_count < 1:
if slr_count < 1:
raise ex.RequiredServiceMissingException(
'SOLR_SERVER', required_by='HBASE_INDEXER')
if hbm_count < 1:
raise ex.RequiredServiceMissingException(
'HBASE_MASTER', required_by='HBASE_INDEXER')
ics_count = _get_inst_count(cluster, 'CATALOGSERVER')
iss_count = _get_inst_count(cluster, 'STATESTORE')
ics_count = _get_inst_count(cluster, 'IMPALA_CATALOGSERVER')
iss_count = _get_inst_count(cluster, 'IMPALA_STATESTORE')
id_count = _get_inst_count(cluster, 'IMPALAD')
if ics_count not in [0, 1]:
raise ex.InvalidComponentCountException('CATALOGSERVER', _('0 or 1'),
ics_count)
raise ex.InvalidComponentCountException('IMPALA_CATALOGSERVER',
_('0 or 1'), ics_count)
if iss_count not in [0, 1]:
raise ex.InvalidComponentCountException('STATESTORE', _('0 or 1'),
iss_count)
raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
_('0 or 1'), iss_count)
if ics_count == 1:
if iss_count != 1:
raise ex.RequiredServiceMissingException(
'STATESTORE', required_by='IMPALA')
'IMPALA_STATESTORE', required_by='IMPALA')
if id_count < 1:
raise ex.RequiredServiceMissingException(
'IMPALAD', required_by='IMPALA')
if dn_count < 1:
raise ex.RequiredServiceMissingException(
'DATANODE', required_by='IMPALA')
'HDFS_DATANODE', required_by='IMPALA')
if hms_count < 1:
raise ex.RequiredServiceMissingException(
'HIVEMETASTORE', required_by='IMPALA')
'HIVE_METASTORE', required_by='IMPALA')
def validate_additional_ng_scaling(cluster, additional):
@@ -232,7 +233,7 @@ def validate_additional_ng_scaling(cluster, additional):
raise ex.NodeGroupCannotBeScaled(
ng.name, msg % {'processes': ' '.join(ng.node_processes)})
if not rm and 'NODEMANAGER' in ng.node_processes:
if not rm and 'YARN_NODEMANAGER' in ng.node_processes:
msg = _("CDH plugin cannot scale node group with processes "
"which have no master-processes run in cluster")
raise ex.NodeGroupCannotBeScaled(ng.name, msg)
@@ -254,7 +255,7 @@ def validate_existing_ng_scaling(cluster, existing):
def _get_scalable_processes():
return ['DATANODE', 'NODEMANAGER']
return ['HDFS_DATANODE', 'YARN_NODEMANAGER']
def _get_inst_count(cluster, process):

View File

@@ -18,15 +18,18 @@ from sahara.tests.unit import testutils as tu
def get_fake_cluster(**kwargs):
mng = tu.make_inst_dict('id1', 'manager_inst', management_ip='1.2.3.4')
mng_ng = tu.make_ng_dict('manager_ng', 1, ['MANAGER'], 1, [mng])
mng_ng = tu.make_ng_dict('manager_ng', 1, ['CLOUDERA_MANAGER'], 1, [mng])
mst = tu.make_inst_dict('id2', 'master_inst', management_ip='1.2.3.5')
mst_ng = tu.make_ng_dict('master_ng', 1, ['NAMENODE', 'SECONDARYNAMENODE',
'RESOURCEMANAGER', 'JOBHISTORY',
mst_ng = tu.make_ng_dict('master_ng', 1, ['HDFS_NAMENODE',
'HDFS_SECONDARYNAMENODE',
'YARN_RESOURCEMANAGER',
'YARN_JOBHISTORY',
'OOZIE_SERVER'], 1, [mst])
wkrs = _get_workers()
wkrs_ng = tu.make_ng_dict('worker_ng', 1, ['DATANODE', 'NODEMANAGER'],
wkrs_ng = tu.make_ng_dict('worker_ng', 1, ['HDFS_DATANODE',
'YARN_NODEMANAGER'],
len(wkrs), wkrs)
return tu.create_cluster('test_cluster', 1, 'cdh', '5',
[mng_ng, mst_ng, wkrs_ng],