[Ambari] fix Ubuntu deploy

Fix some errors by deploying Ubuntu clusters with
Ambari plugin.
Done:
1. Fix errors by setup ambari-agents in 109l of
   sahara/plugins/ambari/deploy.py
2. Added "ranger-hdfs-plugin-properties" to configs
   2.4 Ambari version.
3. Added method "_resolve_package_conflicts" for
   uninstall packages which conflict with Oozie
   packages.
4. Disable base repos usage for ubuntu in case of cluster
   deploing with mirror.

co-authored-by: Michael Ionkin <mionkin@mirantis.com>
Change-Id: I57c18c4905314f4bf2b8f9fe3a1ff6035c5a9088
Closes-bug: 1567955
This commit is contained in:
Mikhail Lelyakin 2016-08-22 11:36:02 +03:00 committed by Michael Ionkin
parent 83a6a28683
commit fcc7bfe70f
3 changed files with 44 additions and 9 deletions

View File

@ -54,7 +54,8 @@ os_type_map = {
"centos6": "redhat6",
"redhat6": "redhat6",
"centos7": "redhat7",
"redhat7": "redhat7"
"redhat7": "redhat7",
"ubuntu14": "ubuntu14"
}
@ -65,6 +66,7 @@ def setup_ambari(cluster):
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
with ambari.remote() as r:
sudo = functools.partial(r.execute_command, run_as_root=True)
sudo("rngd -r /dev/urandom -W 4096")
sudo("ambari-server setup -s -j"
" `cut -f2 -d \"=\" /etc/profile.d/99-java.sh`", timeout=1800)
redirect_file = "/tmp/%s" % uuidutils.generate_uuid()
@ -95,12 +97,15 @@ def _setup_agents(instances, manager_address):
def _disable_repos_on_inst(instance):
with context.set_current_instance_id(instance_id=instance.instance_id):
with instance.remote() as r:
tmp_name = "/tmp/yum.repos.d-%s" % instance.instance_id[:8]
sudo = functools.partial(r.execute_command, run_as_root=True)
# moving to other folder
sudo("mv /etc/yum.repos.d/ {fold_name}".format(
fold_name=tmp_name))
sudo("mkdir /etc/yum.repos.d")
if r.get_os_distrib() == "ubuntu":
sudo("mv /etc/apt/sources.list /etc/apt/sources.list.tmp")
else:
tmp_name = "/tmp/yum.repos.d-%s" % instance.instance_id[:8]
# moving to other folder
sudo("mv /etc/yum.repos.d/ {fold_name}".format(
fold_name=tmp_name))
sudo("mkdir /etc/yum.repos.d")
def disable_repos(cluster):
@ -120,9 +125,17 @@ def _setup_agent(instance, ambari_address):
sudo = functools.partial(r.execute_command, run_as_root=True)
r.replace_remote_string("/etc/ambari-agent/conf/ambari-agent.ini",
"localhost", ambari_address)
sudo("service ambari-agent start")
try:
sudo("ambari-agent start")
except Exception as e:
# workaround for ubuntu, because on ubuntu the ambari agent
# starts automatically after image boot
msg = _("Restart of ambari-agent is needed for host {}, "
"reason: {}").format(instance.fqdn(), e)
LOG.exception(msg)
sudo("ambari-agent restart")
# for correct installing packages
sudo("yum clean all")
r.update_repository()
@cpo.event_wrapper(True, step=_("Wait Ambari accessible"),
@ -142,6 +155,20 @@ def _check_port_accessible(host, port):
return False
def resolve_package_conflicts(cluster, instances=None):
if not instances:
instances = plugin_utils.get_instances(cluster)
for instance in instances:
with instance.remote() as r:
if r.get_os_distrib() == 'ubuntu':
try:
r.execute_command(
"apt-get remove -y libmysql-java", run_as_root=True)
except Exception:
LOG.warning(_LW("Can't remove libmysql-java, "
"it's probably not installed"))
def _prepare_ranger(cluster):
ranger = plugin_utils.get_instance(cluster, p_common.RANGER_ADMIN)
if not ranger:
@ -149,7 +176,6 @@ def _prepare_ranger(cluster):
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
with ambari.remote() as r:
sudo = functools.partial(r.execute_command, run_as_root=True)
sudo("yum install -y mysql-connector-java")
sudo("ambari-server setup --jdbc-db=mysql "
"--jdbc-driver=/usr/share/java/mysql-connector-java.jar")
init_db_template = (

View File

@ -91,6 +91,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
plugin_utils.get_instances(cluster))
deploy.prepare_kerberos(cluster)
deploy.set_up_hdp_repos(cluster)
deploy.resolve_package_conflicts(cluster)
deploy.create_blueprint(cluster)
def start_cluster(self, cluster):
@ -189,6 +190,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
deploy.setup_agents(cluster, instances)
cluster = conductor.cluster_get(context.ctx(), cluster.id)
deploy.wait_host_registration(cluster, instances)
deploy.resolve_package_conflicts(cluster, instances)
deploy.add_new_hosts(cluster, instances)
deploy.manage_config_groups(cluster, instances)
deploy.manage_host_components(cluster, instances)

View File

@ -921,6 +921,13 @@
"oozie.service.ProxyUserService.proxyuser.falcon.hosts": "*",
"oozie.service.URIHandlerService.uri.handlers": "org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler",
"oozie.services.ext": "org.apache.oozie.service.JMSAccessorService,org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService"
},
"ranger-hdfs-plugin-properties": {
"REPOSITORY_CONFIG_USERNAME": "hadoop",
"common.name.for.certificate": "",
"hadoop.rpc.protection": "",
"policy_user": "ambari-qa",
"ranger-hdfs-plugin-enabled": "No"
},
"spark-defaults": {
"spark.eventLog.dir": "hdfs:///spark-history",