Add validation rules about IMPALAD
In Cloudera documentation it's said that IMPALA daemons should be installed on the same instances with DATANODE service. Such rules definetly should be added into Sahara. Change-Id: I51499c1a9fff710e42c66da014289233a33e0726 Closes-bug: 1463959
This commit is contained in:
parent
c6cb9c24b2
commit
c17f4dfa51
@ -75,7 +75,7 @@ cloudera plugin versions:
|
||||
and at least one hbase regionserver.
|
||||
+ Cluster can't contain hbase regionserver without at least one hbase maser.
|
||||
|
||||
In case of 5.3.0 version of Cloudera Plugin there are few extra limitations
|
||||
In case of 5.3.0 or 5.4.0 version of Cloudera Plugin there are few extra limitations
|
||||
in the cluster topology:
|
||||
|
||||
+ Cluster can't contain flume without at least one datanode.
|
||||
@ -93,3 +93,4 @@ in the cluster topology:
|
||||
+ Cluster can contain at most one impala statestore.
|
||||
+ Cluster can't contain impala catalogserver without impala statestore,
|
||||
at least one impalad service, at least one datanode, and metastore.
|
||||
+ If using Imapala, the daemons must be installed on every datanode.
|
||||
|
@ -205,6 +205,12 @@ def validate_cluster_creating(cluster):
|
||||
raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
|
||||
_('0 or 1'), iss_count)
|
||||
if ics_count == 1:
|
||||
datanodes = set(u.get_instances(cluster, "HDFS_DATANODE"))
|
||||
impalads = set(u.get_instances(cluster, "IMPALAD"))
|
||||
if len(datanodes ^ impalads) > 0:
|
||||
raise ex.InvalidClusterTopology(
|
||||
_("IMPALAD must be installed on every HDFS_DATANODE"))
|
||||
|
||||
if iss_count != 1:
|
||||
raise ex.RequiredServiceMissingException(
|
||||
'IMPALA_STATESTORE', required_by='IMPALA')
|
||||
|
@ -226,6 +226,12 @@ def validate_cluster_creating(cluster):
|
||||
raise ex.InvalidComponentCountException('IMPALA_STATESTORE',
|
||||
_('0 or 1'), iss_count)
|
||||
if ics_count == 1:
|
||||
datanodes = set(u.get_instances(cluster, "HDFS_DATANODE"))
|
||||
impalads = set(u.get_instances(cluster, "IMPALAD"))
|
||||
if len(datanodes ^ impalads) > 0:
|
||||
raise ex.InvalidClusterTopology(
|
||||
_("IMPALAD must be installed on every HDFS_DATANODE"))
|
||||
|
||||
if iss_count != 1:
|
||||
raise ex.RequiredServiceMissingException(
|
||||
'IMPALA_STATESTORE', required_by='IMPALA')
|
||||
|
@ -85,6 +85,20 @@ class InvalidComponentCountException(e.SaharaException):
|
||||
super(InvalidComponentCountException, self).__init__()
|
||||
|
||||
|
||||
class InvalidClusterTopology(e.SaharaException):
|
||||
"""Exception indicating another problems in a cluster topology,
|
||||
|
||||
which is different from InvalidComponentCountException and
|
||||
RequiredServiceMissingException.
|
||||
"""
|
||||
code = "INVALID_TOPOLOGY"
|
||||
message = _("Cluster has invalid topology: {description}")
|
||||
|
||||
def __init__(self, description):
|
||||
self.message = self.message.format(description=description)
|
||||
super(InvalidClusterTopology, self).__init__()
|
||||
|
||||
|
||||
class HadoopProvisionError(e.SaharaException):
|
||||
"""Exception indicating that cluster provisioning failed.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user