Merge "Refactor service home dir owner setting"

This commit is contained in:
Jenkins
2016-08-31 17:45:26 +00:00
committed by Gerrit Code Review
7 changed files with 23 additions and 45 deletions

View File

@@ -13,18 +13,22 @@
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
import six
import sahara.exceptions as e
from sahara.i18n import _
import sahara.plugins.exceptions as ex
from sahara.plugins.mapr.util import commands as cmd
from sahara.plugins.mapr.util import event_log as el
from sahara.plugins.mapr.util import general as g
from sahara.plugins.mapr.util import service_utils as su
import sahara.plugins.provisioning as p
from sahara.utils import files as files
LOG = logging.getLogger(__name__)
SERVICE_UI = 'Web UI'
_INSTALL_PACKAGES_TIMEOUT = 3600
@@ -111,6 +115,13 @@ class Service(object):
return result
def _set_service_dir_owner(self, cluster_context, instances):
service_instances = cluster_context.filter_instances(instances,
service=self)
LOG.debug("Changing %s service dir owner" % self.ui_name)
for instance in service_instances:
cmd.chown(instance, 'mapr:mapr', self.service_dir(cluster_context))
def post_install(self, cluster_context, instances):
pass

View File

@@ -42,8 +42,8 @@ class Drill(s.Service):
def post_start(self, cluster_context, instances):
instances = instances or cluster_context.get_instances(DRILL)
super(Drill, self).install(cluster_context, instances)
self._set_service_dir_owner(cluster_context, instances)
for instance in instances:
cmd.chown(instance, 'mapr:mapr', self.service_dir(cluster_context))
cmd.re_configure_sh(instance, cluster_context)

View File

@@ -15,7 +15,6 @@
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.commands as cmd
import sahara.plugins.mapr.util.validation_utils as vu
@@ -38,5 +37,4 @@ class HttpFS(s.Service):
self._validation_rules = [vu.exactly(1, HTTP_FS)]
def post_install(self, cluster_context, instances):
instance = cluster_context.get_instance(HTTP_FS)
cmd.chown(instance, 'mapr:mapr', self.service_dir(cluster_context))
self._set_service_dir_owner(cluster_context, instances)

View File

@@ -166,9 +166,7 @@ class Hue(s.Service):
})
livy_host = context.get_instance(HUE_LIVY)
spark_instance = context.get_instance(
spark.SPARK_HISTORY_SERVER)
if livy_host and spark_instance:
if livy_host:
result.update({
'livy_host': livy_host.internal_ip
})
@@ -188,31 +186,12 @@ class Hue(s.Service):
}
remote.execute_command(cmd % args, run_as_root=True, timeout=600)
def set_owner(remote):
remote.execute_command('chown -R mapr:mapr /opt/mapr/hue',
run_as_root=True)
if hue_instance:
with hue_instance.remote() as r:
LOG.debug("Executing Hue database migration")
migrate_database(r, cluster_context)
LOG.debug("Changing Hue home dir owner")
set_owner(r)
self._copy_hive_configs(cluster_context, hue_instance)
self._install_jt_plugin(cluster_context, hue_instance)
hue_livy_instance = cluster_context.get_instance(HUE_LIVY)
if hue_livy_instance:
with hue_livy_instance.remote() as r:
LOG.debug("Changing Hue home dir owner")
set_owner(r)
def _set_hue_sh_chmod(self, cluster_context):
cmd = 'chmod 777 %s' % (self.home_dir(cluster_context) + '/bin/hue.sh')
hue_instance = cluster_context.get_instance(HUE)
if hue_instance:
with hue_instance.remote() as r:
r.execute_command(cmd, run_as_root=True)
with hue_instance.remote() as r:
LOG.debug("Executing Hue database migration")
migrate_database(r, cluster_context)
self._copy_hive_configs(cluster_context, hue_instance)
self._install_jt_plugin(cluster_context, hue_instance)
self._set_service_dir_owner(cluster_context, instances)
def _copy_hive_configs(self, cluster_context, hue_instance):
hive_server = cluster_context.get_instance(hive.HIVE_SERVER_2)

View File

@@ -17,7 +17,6 @@ import sahara.plugins.mapr.domain.configuration_file as bcf
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.services.hive.hive as hive
import sahara.plugins.mapr.util.commands as cmd
import sahara.plugins.mapr.util.maprfs_helper as mfs
import sahara.plugins.mapr.util.validation_utils as vu
import sahara.utils.files as files
@@ -91,9 +90,7 @@ class Impala(s.Service):
return [impala_env]
def post_install(self, cluster_context, instances):
impalas = cluster_context.filter_instances(instances, IMPALA_SERVER)
for instance in impalas:
cmd.chown(instance, 'mapr:mapr', self.service_dir(cluster_context))
self._set_service_dir_owner(cluster_context, instances)
class ImpalaV141(Impala):

View File

@@ -81,10 +81,6 @@ class Oozie(s.Service):
}
return jdbc_uri % jdbc_args
def _set_owner(self, remote):
remote.execute_command('chown -R mapr:mapr /opt/mapr/oozie',
run_as_root=True)
def install(self, cluster_context, instances):
# oozie requires executed configure.sh
pass
@@ -112,7 +108,7 @@ class Oozie(s.Service):
LOG.debug('Installing MySQL connector for Oozie')
r.execute_command(symlink_cmd, run_as_root=True,
raise_when_error=False)
self._set_owner(r)
self._set_service_dir_owner(cluster_context, instances)
def post_start(self, cluster_context, instances):
instances = cluster_context.filter_instances(instances, OOZIE)

View File

@@ -15,7 +15,6 @@
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.commands as cmd
import sahara.plugins.mapr.util.validation_utils as vu
@@ -44,6 +43,4 @@ class Sqoop2(s.Service):
]
def post_install(self, context, instances):
sqoop_servers = context.filter_instances(instances, SQOOP_2_SERVER)
for instance in sqoop_servers:
cmd.chown(instance, 'mapr:mapr', self.service_dir(context))
self._set_service_dir_owner(context, instances)