Create an option for Spark path
Remove the hard-coded path to the Spark installation and move it to an option. The default is '/opt/spark', where Spark is installed by the DIB. Co-Authored-By: Trevor McKay <tmckay@redhat.com> Change-Id: I428f0148e893695a2a225006f0fe5c8bbb9b972f
This commit is contained in:
parent
159aea00c7
commit
8401903ab7
@ -89,6 +89,13 @@ SPARK_CONFS = {
|
||||
' machine (default: 1)',
|
||||
'default': '1',
|
||||
'priority': 2,
|
||||
},
|
||||
{
|
||||
'name': 'Spark home',
|
||||
'description': 'The location of the spark installation'
|
||||
' (default: /opt/spark)',
|
||||
'default': '/opt/spark',
|
||||
'priority': 2,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from sahara import conductor
|
||||
@ -115,13 +117,16 @@ class SparkProvider(p.ProvisioningPluginBase):
|
||||
# start spark nodes
|
||||
if sm_instance:
|
||||
with remote.get_remote(sm_instance) as r:
|
||||
run.start_spark_master(r)
|
||||
run.start_spark_master(r, self._spark_home(cluster))
|
||||
LOG.info("Spark service at '%s' has been started",
|
||||
sm_instance.hostname())
|
||||
|
||||
LOG.info('Cluster %s has been started successfully' % cluster.name)
|
||||
self._set_cluster_info(cluster)
|
||||
|
||||
def _spark_home(self, cluster):
|
||||
return c_helper.get_config_value("Spark", "Spark home", cluster)
|
||||
|
||||
def _extract_configs_to_extra(self, cluster):
|
||||
nn = utils.get_instance(cluster, "namenode")
|
||||
sp_master = utils.get_instance(cluster, "master")
|
||||
@ -204,9 +209,10 @@ class SparkProvider(p.ProvisioningPluginBase):
|
||||
'/etc/hadoop/conf/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
|
||||
}
|
||||
|
||||
sp_home = self._spark_home(cluster)
|
||||
files_spark = {
|
||||
'/opt/spark/conf/spark-env.sh': ng_extra['sp_master'],
|
||||
'/opt/spark/conf/slaves': ng_extra['sp_slaves']
|
||||
os.path.join(sp_home, 'conf/spark-env.sh'): ng_extra['sp_master'],
|
||||
os.path.join(sp_home, 'conf/slaves'): ng_extra['sp_slaves']
|
||||
}
|
||||
|
||||
files_init = {
|
||||
@ -238,7 +244,7 @@ class SparkProvider(p.ProvisioningPluginBase):
|
||||
'sudo chown -R $USER:$USER /etc/hadoop'
|
||||
)
|
||||
r.execute_command(
|
||||
'sudo chown -R $USER:$USER /opt/spark'
|
||||
'sudo chown -R $USER:$USER %s' % sp_home
|
||||
)
|
||||
r.write_files_to(files_hadoop)
|
||||
r.write_files_to(files_spark)
|
||||
@ -274,9 +280,11 @@ class SparkProvider(p.ProvisioningPluginBase):
|
||||
|
||||
if need_update_spark:
|
||||
ng_extra = extra[instance.node_group.id]
|
||||
sp_home = self._spark_home(cluster)
|
||||
files = {
|
||||
'/opt/spark/conf/spark-env.sh': ng_extra['sp_master'],
|
||||
'/opt/spark/conf/slaves': ng_extra['sp_slaves'],
|
||||
os.path.join(sp_home,
|
||||
'conf/spark-env.sh'): ng_extra['sp_master'],
|
||||
os.path.join(sp_home, 'conf/slaves'): ng_extra['sp_slaves'],
|
||||
}
|
||||
r = remote.get_remote(instance)
|
||||
r.write_files_to(files)
|
||||
@ -357,14 +365,14 @@ class SparkProvider(p.ProvisioningPluginBase):
|
||||
master = utils.get_instance(cluster, "master")
|
||||
r_master = remote.get_remote(master)
|
||||
|
||||
run.stop_spark(r_master)
|
||||
run.stop_spark(r_master, self._spark_home(cluster))
|
||||
|
||||
self._setup_instances(cluster, instances)
|
||||
nn = utils.get_instance(cluster, "namenode")
|
||||
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
|
||||
self._start_slave_datanode_processes(instances)
|
||||
|
||||
run.start_spark_master(r_master)
|
||||
run.start_spark_master(r_master, self._spark_home(cluster))
|
||||
LOG.info("Spark master service at '%s' has been restarted",
|
||||
master.hostname())
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
|
||||
from sahara.openstack.common import log as logging
|
||||
|
||||
|
||||
@ -45,13 +47,11 @@ def clean_port_hadoop(nn_remote):
|
||||
"| xargs sudo kill -9"))
|
||||
|
||||
|
||||
def start_spark_master(nn_remote):
|
||||
nn_remote.execute_command("bash /opt/spark/sbin/start-all.sh")
|
||||
def start_spark_master(nn_remote, sp_home):
|
||||
nn_remote.execute_command("bash " + os.path.join(sp_home,
|
||||
"sbin/start-all.sh"))
|
||||
|
||||
|
||||
def start_spark_slaves(nn_remote):
|
||||
nn_remote.execute_command("bash /opt/spark/sbin/start-slaves.sh")
|
||||
|
||||
|
||||
def stop_spark(nn_remote):
|
||||
nn_remote.execute_command("bash /opt/spark/sbin/stop-all.sh")
|
||||
def stop_spark(nn_remote, sp_home):
|
||||
nn_remote.execute_command("bash " + os.path.join(sp_home,
|
||||
"sbin/stop-all.sh"))
|
||||
|
@ -35,11 +35,13 @@ def decommission_sl(master, inst_to_be_deleted, survived_inst):
|
||||
else:
|
||||
slaves_content = "\n"
|
||||
|
||||
cluster = master.node_group.cluster
|
||||
sp_home = c_helper.get_config_value("Spark", "Spark home", cluster)
|
||||
r_master = remote.get_remote(master)
|
||||
run.stop_spark(r_master)
|
||||
run.stop_spark(r_master, sp_home)
|
||||
|
||||
# write new slave file to master
|
||||
files = {'/opt/spark/conf/slaves': slaves_content}
|
||||
files = {os.path.join(sp_home, 'conf/slaves'): slaves_content}
|
||||
r_master.write_files_to(files)
|
||||
|
||||
# write new slaves file to each survived slave as well
|
||||
@ -47,7 +49,7 @@ def decommission_sl(master, inst_to_be_deleted, survived_inst):
|
||||
with remote.get_remote(i) as r:
|
||||
r.write_files_to(files)
|
||||
|
||||
run.start_spark_master(r_master)
|
||||
run.start_spark_master(r_master, sp_home)
|
||||
|
||||
|
||||
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
|
||||
|
Loading…
Reference in New Issue
Block a user