Fix down scaling of ambari cluster
Namenode(s) and resourcemanager(s) need to be restarted after down scaling of ambari cluster. This patch adds this functionality. Change-Id: I15c838d0e1f056dda8e468d5e2012ba6941b9782 closes-bug: 1573517
This commit is contained in:
parent
eb82b17371
commit
e718b28b0b
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- Fixed incorrect down scaling of ambari cluster
|
@ -194,6 +194,19 @@ class AmbariClient(object):
|
|||||||
|
|
||||||
self.wait_ambari_request(self.req_id(resp), cluster_name)
|
self.wait_ambari_request(self.req_id(resp), cluster_name)
|
||||||
|
|
||||||
|
def restart_namenode(self, cluster_name, instance):
|
||||||
|
url = self._base_url + "/clusters/%s/requests" % cluster_name
|
||||||
|
data = d_helper.build_namenode_restart_request(cluster_name, instance)
|
||||||
|
resp = self.post(url, data=jsonutils.dumps(data))
|
||||||
|
self.wait_ambari_request(self.req_id(resp), cluster_name)
|
||||||
|
|
||||||
|
def restart_resourcemanager(self, cluster_name, instance):
|
||||||
|
url = self._base_url + "/clusters/%s/requests" % cluster_name
|
||||||
|
data = d_helper.build_resourcemanager_restart_request(cluster_name,
|
||||||
|
instance)
|
||||||
|
resp = self.post(url, data=jsonutils.dumps(data))
|
||||||
|
self.wait_ambari_request(self.req_id(resp), cluster_name)
|
||||||
|
|
||||||
def delete_host(self, cluster_name, instance):
|
def delete_host(self, cluster_name, instance):
|
||||||
url = self._base_url + "/clusters/%s/hosts/%s" % (cluster_name,
|
url = self._base_url + "/clusters/%s/hosts/%s" % (cluster_name,
|
||||||
instance.fqdn())
|
instance.fqdn())
|
||||||
|
@ -345,6 +345,32 @@ def decommission_datanodes(cluster, instances):
|
|||||||
client.decommission_datanodes(cluster.name, instances)
|
client.decommission_datanodes(cluster.name, instances)
|
||||||
|
|
||||||
|
|
||||||
|
def restart_namenode(cluster, instance):
|
||||||
|
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
|
||||||
|
password = cluster.extra["ambari_password"]
|
||||||
|
|
||||||
|
with ambari_client.AmbariClient(ambari, password=password) as client:
|
||||||
|
client.restart_namenode(cluster.name, instance)
|
||||||
|
|
||||||
|
|
||||||
|
def restart_resourcemanager(cluster, instance):
|
||||||
|
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
|
||||||
|
password = cluster.extra["ambari_password"]
|
||||||
|
|
||||||
|
with ambari_client.AmbariClient(ambari, password=password) as client:
|
||||||
|
client.restart_resourcemanager(cluster.name, instance)
|
||||||
|
|
||||||
|
|
||||||
|
def restart_nns_and_rms(cluster):
|
||||||
|
nns = plugin_utils.get_instances(cluster, p_common.NAMENODE)
|
||||||
|
for nn in nns:
|
||||||
|
restart_namenode(cluster, nn)
|
||||||
|
|
||||||
|
rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER)
|
||||||
|
for rm in rms:
|
||||||
|
restart_resourcemanager(cluster, rm)
|
||||||
|
|
||||||
|
|
||||||
def remove_services_from_hosts(cluster, instances):
|
def remove_services_from_hosts(cluster, instances):
|
||||||
for inst in instances:
|
for inst in instances:
|
||||||
LOG.debug("Stopping and removing processes from host %s" % inst.fqdn())
|
LOG.debug("Stopping and removing processes from host %s" % inst.fqdn())
|
||||||
|
@ -187,6 +187,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
|
|||||||
def decommission_nodes(self, cluster, instances):
|
def decommission_nodes(self, cluster, instances):
|
||||||
deploy.decommission_hosts(cluster, instances)
|
deploy.decommission_hosts(cluster, instances)
|
||||||
deploy.remove_services_from_hosts(cluster, instances)
|
deploy.remove_services_from_hosts(cluster, instances)
|
||||||
|
deploy.restart_nns_and_rms(cluster)
|
||||||
|
|
||||||
def validate_scaling(self, cluster, existing, additional):
|
def validate_scaling(self, cluster, existing, additional):
|
||||||
validation.validate(cluster.id)
|
validation.validate(cluster.id)
|
||||||
|
Loading…
Reference in New Issue
Block a user