Add translation support to plugin modules

Changed modules:
* sahara/plugins/base.py
* sahara/plugins/general/exceptions.py
* sahara/plugins/general/utils.py
* sahara/plugins/hdp/ambariplugin.py
* sahara/plugins/hdp/clusterspec.py
* sahara/plugins/hdp/configprovider.py
* sahara/plugins/hdp/hadoopserver.py
* sahara/plugins/hdp/versions/version_1_3_2/services.py
* sahara/plugins/hdp/versions/version_1_3_2/versionhandler.py
* sahara/plugins/hdp/versions/version_2_0_6/services.py
* sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py
* sahara/plugins/provisioning.py
* sahara/plugins/spark/config_helper.py
* sahara/plugins/spark/plugin.py
* sahara/plugins/spark/scaling.py
* sahara/plugins/vanilla/hadoop2/config.py
* sahara/plugins/vanilla/hadoop2/config_helper.py
* sahara/plugins/vanilla/hadoop2/run_scripts.py
* sahara/plugins/vanilla/hadoop2/scaling.py
* sahara/plugins/vanilla/hadoop2/validation.py
* sahara/plugins/vanilla/plugin.py
* sahara/plugins/vanilla/v1_2_1/config_helper.py
* sahara/plugins/vanilla/v1_2_1/scaling.py
* sahara/plugins/vanilla/v1_2_1/versionhandler.py

Change-Id: Ia3dacd7ce5a77e8b51747d9c8e1a17fab266395e
This commit is contained in:
Andrey Pavlov 2014-07-17 17:57:16 +04:00
parent 13647cd70a
commit 6bc8ce78de
26 changed files with 391 additions and 297 deletions

View File

@ -20,6 +20,8 @@ import six
from stevedore import enabled from stevedore import enabled
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.utils import resources from sahara.utils import resources
@ -98,17 +100,18 @@ class PluginManager(object):
for ext in extension_manager.extensions: for ext in extension_manager.extensions:
if ext.name in self.plugins: if ext.name in self.plugins:
raise ex.ConfigurationError( raise ex.ConfigurationError(
"Plugin with name '%s' already exists." % ext.name) _("Plugin with name '%s' already exists.") % ext.name)
ext.obj.name = ext.name ext.obj.name = ext.name
self.plugins[ext.name] = ext.obj self.plugins[ext.name] = ext.obj
LOG.info("Plugin '%s' loaded (%s)" LOG.info(_LI("Plugin '%(plugin_name)s' loaded %(entry_point)s"),
% (ext.name, ext.entry_point_target)) {'plugin_name': ext.name,
'entry_point': ext.entry_point_target})
if len(self.plugins) < len(config_plugins): if len(self.plugins) < len(config_plugins):
loaded_plugins = set(six.iterkeys(self.plugins)) loaded_plugins = set(six.iterkeys(self.plugins))
requested_plugins = set(config_plugins) requested_plugins = set(config_plugins)
raise ex.ConfigurationError( raise ex.ConfigurationError(
"Plugins couldn't be loaded: %s" % _("Plugins couldn't be loaded: %s") %
", ".join(requested_plugins - loaded_plugins)) ", ".join(requested_plugins - loaded_plugins))
def get_plugins(self, base): def get_plugins(self, base):

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sahara.i18n import _
from sahara.plugins import provisioning as p from sahara.plugins import provisioning as p
@ -22,7 +23,7 @@ class FakePluginProvider(p.ProvisioningPluginBase):
return "Fake Plugin" return "Fake Plugin"
def get_description(self): def get_description(self):
return ("It's a fake plugin that aimed to work on the CirrOS images. " return _("It's a fake plugin that aimed to work on the CirrOS images. "
"It doesn't install Hadoop. It's needed to be able to test " "It doesn't install Hadoop. It's needed to be able to test "
"provisioning part of Sahara codebase itself.") "provisioning part of Sahara codebase itself.")

View File

@ -14,18 +14,19 @@
# limitations under the License. # limitations under the License.
import sahara.exceptions as e import sahara.exceptions as e
from sahara.i18n import _
class NodeGroupCannotBeScaled(e.SaharaException): class NodeGroupCannotBeScaled(e.SaharaException):
def __init__(self, ng_name, reason): def __init__(self, ng_name, reason):
self.message = ("Chosen node group %s cannot be scaled : " self.message = _("Chosen node group %(ng_name)s cannot be scaled : "
"%s" % (ng_name, reason)) "%(reason)s") % {"ng_name": ng_name, "reason": reason}
self.code = "NODE_GROUP_CANNOT_BE_SCALED" self.code = "NODE_GROUP_CANNOT_BE_SCALED"
class DecommissionError(e.SaharaException): class DecommissionError(e.SaharaException):
code = "DECOMMISSION_ERROR" code = "DECOMMISSION_ERROR"
message = "Failed to decommission cluster" message = _("Failed to decommission cluster")
def __init__(self, message=None): def __init__(self, message=None):
if message: if message:
@ -34,8 +35,9 @@ class DecommissionError(e.SaharaException):
class ClusterCannotBeScaled(e.SaharaException): class ClusterCannotBeScaled(e.SaharaException):
def __init__(self, cluster_name, reason): def __init__(self, cluster_name, reason):
self.message = ("Cluster %s cannot be scaled : " self.message = _("Cluster %(cluster_name)s cannot be scaled : "
"%s" % (cluster_name, reason)) "%(reason)s") % {"cluster_name": cluster_name,
"reason": reason}
self.code = "CLUSTER_CANNOT_BE_SCALED" self.code = "CLUSTER_CANNOT_BE_SCALED"
@ -43,11 +45,13 @@ class RequiredServiceMissingException(e.SaharaException):
"""Exception indicating that a required service has not been deployed.""" """Exception indicating that a required service has not been deployed."""
def __init__(self, service_name, required_by=None): def __init__(self, service_name, required_by=None):
self.message = ('Cluster is missing a service: %s' self.message = (_('Cluster is missing a service: %s')
% service_name) % service_name)
if required_by: if required_by:
self.message = ('%s, required by service: %s' self.message = (_('%(message)s, required by service: '
% (self.message, required_by)) '%(required_by)s')
% {'message': self.message,
'required_by': required_by})
self.code = 'MISSING_SERVICE' self.code = 'MISSING_SERVICE'
@ -62,11 +66,17 @@ class InvalidComponentCountException(e.SaharaException):
""" """
def __init__(self, component, expected_count, count, description=None): def __init__(self, component, expected_count, count, description=None):
self.message = ("Hadoop cluster should contain {0} {1} component(s)." message = _("Hadoop cluster should contain %(expected_count)s "
" Actual {1} count is {2}".format( "%(component)s component(s)."
expected_count, component, count)) " Actual %(component)s count is %(count)s")
if description: if description:
self.message += '. ' + description message = ("%(message)s. %(description)s"
% {'message': message, 'description': description})
self.message = message % {"expected_count": expected_count,
"component": component, "count": count}
self.code = "INVALID_COMPONENT_COUNT" self.code = "INVALID_COMPONENT_COUNT"
super(InvalidComponentCountException, self).__init__() super(InvalidComponentCountException, self).__init__()
@ -78,7 +88,7 @@ class HadoopProvisionError(e.SaharaException):
A message indicating the reason for failure must be provided. A message indicating the reason for failure must be provided.
""" """
base_message = "Failed to Provision Hadoop Cluster: %s" base_message = _("Failed to Provision Hadoop Cluster: %s")
def __init__(self, message): def __init__(self, message):
self.code = "HADOOP_PROVISION_FAILED" self.code = "HADOOP_PROVISION_FAILED"

View File

@ -16,6 +16,7 @@
from oslo.utils import netutils from oslo.utils import netutils
from six.moves.urllib import parse as urlparse from six.moves.urllib import parse as urlparse
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
@ -38,7 +39,7 @@ def get_instance(cluster, node_process):
instances = get_instances(cluster, node_process) instances = get_instances(cluster, node_process)
if len(instances) > 1: if len(instances) > 1:
raise ex.InvalidComponentCountException( raise ex.InvalidComponentCountException(
node_process, '0 or 1', len(instances)) node_process, _('0 or 1'), len(instances))
return instances[0] if instances else None return instances[0] if instances else None

View File

@ -17,6 +17,8 @@ from oslo.config import cfg
from sahara import conductor from sahara import conductor
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u from sahara.plugins.general import utils as u
@ -64,7 +66,7 @@ class AmbariPlugin(p.ProvisioningPluginBase):
# enabled # enabled
self._configure_topology_for_cluster(cluster, servers) self._configure_topology_for_cluster(cluster, servers)
LOG.info("Install of Hadoop stack successful.") LOG.info(_LI("Install of Hadoop stack successful."))
# add service urls # add service urls
self._set_cluster_info(cluster, cluster_spec) self._set_cluster_info(cluster, cluster_spec)
@ -171,8 +173,8 @@ class AmbariPlugin(p.ProvisioningPluginBase):
servers, version): servers, version):
# TODO(jspeidel): encapsulate in another class # TODO(jspeidel): encapsulate in another class
LOG.info('Provisioning Cluster via Ambari Server: {0} ...'.format( LOG.info(_LI('Provisioning Cluster via Ambari Server: {0} ...')
ambari_info.get_address())) .format(ambari_info.get_address()))
for server in servers: for server in servers:
self._spawn( self._spawn(
@ -221,9 +223,9 @@ class AmbariPlugin(p.ProvisioningPluginBase):
if not is_admin_provided: if not is_admin_provided:
if admin_user is None: if admin_user is None:
raise ex.HadoopProvisionError("An Ambari user in the " raise ex.HadoopProvisionError(_("An Ambari user in the"
"admin group must be " " admin group must be "
"configured.") "configured."))
ambari_info.user = admin_user ambari_info.user = admin_user
ambari_info.password = admin_password ambari_info.password = admin_password
ambari_client.delete_ambari_user('admin', ambari_info) ambari_client.delete_ambari_user('admin', ambari_info)
@ -240,7 +242,7 @@ class AmbariPlugin(p.ProvisioningPluginBase):
ambari_info.user = admin_user.name ambari_info.user = admin_user.name
ambari_info.password = admin_user.password ambari_info.password = admin_user.password
LOG.info('Using "{0}" as admin user for scaling of cluster' LOG.info(_LI('Using "{0}" as admin user for scaling of cluster')
.format(ambari_info.user)) .format(ambari_info.user))
# PLUGIN SPI METHODS: # PLUGIN SPI METHODS:
@ -276,7 +278,7 @@ class AmbariPlugin(p.ProvisioningPluginBase):
return 'Hortonworks Data Platform' return 'Hortonworks Data Platform'
def get_description(self): def get_description(self):
return ('The Hortonworks OpenStack plugin works with project ' return _('The Hortonworks OpenStack plugin works with project '
'Sahara to automate the deployment of the Hortonworks data' 'Sahara to automate the deployment of the Hortonworks data'
' platform on OpenStack based public & private clouds') ' platform on OpenStack based public & private clouds')
@ -323,8 +325,8 @@ class AmbariPlugin(p.ProvisioningPluginBase):
ambari_client.cleanup(ambari_info) ambari_client.cleanup(ambari_info)
def decommission_nodes(self, cluster, instances): def decommission_nodes(self, cluster, instances):
LOG.info('AmbariPlugin: decommission_nodes called for ' LOG.info(_LI('AmbariPlugin: decommission_nodes called for '
'HDP version = ' + cluster.hadoop_version) 'HDP version = %s'), cluster.hadoop_version)
handler = self.version_factory.get_version_handler( handler = self.version_factory.get_version_handler(
cluster.hadoop_version) cluster.hadoop_version)

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sahara.i18n import _
from sahara.openstack.common import jsonutils as json from sahara.openstack.common import jsonutils as json
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
@ -312,9 +313,9 @@ class NormalizedClusterConfig():
return 'boolean' return 'boolean'
else: else:
raise ValueError( raise ValueError(
"Could not determine property type for property '{0}' with " _("Could not determine property type for property "
"value: {1}". "'%(property)s' with value: %(value)s") %
format(prop, value)) {"property": prop, "value": value})
class NormalizedConfig(): class NormalizedConfig():

View File

@ -14,6 +14,7 @@
# limitations under the License. # limitations under the License.
from sahara import exceptions from sahara import exceptions
from sahara.i18n import _
from sahara.plugins import provisioning as p from sahara.plugins import provisioning as p
@ -61,8 +62,8 @@ class ConfigurationProvider:
# internal error # internal error
# ambari-config-resource contains duplicates # ambari-config-resource contains duplicates
raise exceptions.InvalidDataException( raise exceptions.InvalidDataException(
'Internal Error. Duplicate property ' _('Internal Error. Duplicate property '
'name detected: %s' % property_name) 'name detected: %s') % property_name)
self.config_mapper[service_property['name']] = ( self.config_mapper[service_property['name']] = (
self._get_target( self._get_target(
service_property['applicable_target'])) service_property['applicable_target']))

View File

@ -15,6 +15,8 @@
import re import re
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.hdp import saharautils from sahara.plugins.hdp import saharautils
@ -64,7 +66,7 @@ class HadoopServer:
@saharautils.inject_remote('r') @saharautils.inject_remote('r')
def install_rpms(self, r): def install_rpms(self, r):
LOG.info( LOG.info(
"{0}: Installing rpm's ...".format(self.instance.hostname())) _LI("{0}: Installing rpm's ...").format(self.instance.hostname()))
# TODO(jspeidel): based on image type, use correct command # TODO(jspeidel): based on image type, use correct command
curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' % curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' %
@ -76,17 +78,17 @@ class HadoopServer:
yum_cmd = 'yum -y install %s' % EPEL_RELEASE_PACKAGE_NAME yum_cmd = 'yum -y install %s' % EPEL_RELEASE_PACKAGE_NAME
r.execute_command(yum_cmd, run_as_root=True) r.execute_command(yum_cmd, run_as_root=True)
else: else:
LOG.info("{0}: Unable to install rpm's from repo, " LOG.info(_LI("{0}: Unable to install rpm's from repo, "
"checking for local install." "checking for local install.")
.format(self.instance.hostname())) .format(self.instance.hostname()))
if not self.rpms_installed(): if not self.rpms_installed():
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to install Hortonworks Ambari') _('Failed to install Hortonworks Ambari'))
@saharautils.inject_remote('r') @saharautils.inject_remote('r')
def install_swift_integration(self, r): def install_swift_integration(self, r):
LOG.info( LOG.info(
"{0}: Installing swift integration ..." _LI("{0}: Installing swift integration ...")
.format(self.instance.hostname())) .format(self.instance.hostname()))
base_rpm_cmd = 'rpm -U --quiet ' base_rpm_cmd = 'rpm -U --quiet '
rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_RPM rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_RPM
@ -94,8 +96,8 @@ class HadoopServer:
run_as_root=True, run_as_root=True,
raise_when_error=False) raise_when_error=False)
if ret_code != 0: if ret_code != 0:
LOG.info("{0}: Unable to install swift integration from source, " LOG.info(_LI("{0}: Unable to install swift integration from "
"checking for local rpm." "source, checking for local rpm.")
.format(self.instance.hostname())) .format(self.instance.hostname()))
ret_code, stdout = r.execute_command( ret_code, stdout = r.execute_command(
'ls ' + HADOOP_SWIFT_LOCAL_RPM, 'ls ' + HADOOP_SWIFT_LOCAL_RPM,
@ -106,7 +108,7 @@ class HadoopServer:
r.execute_command(rpm_cmd, run_as_root=True) r.execute_command(rpm_cmd, run_as_root=True)
else: else:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to install Hadoop Swift integration') _('Failed to install Hadoop Swift integration'))
@saharautils.inject_remote('r') @saharautils.inject_remote('r')
def configure_topology(self, topology_str, r): def configure_topology(self, topology_str, r):
@ -121,11 +123,11 @@ class HadoopServer:
@saharautils.inject_remote('r') @saharautils.inject_remote('r')
def _setup_and_start_ambari_server(self, port, jdk_path, r): def _setup_and_start_ambari_server(self, port, jdk_path, r):
LOG.info('{0}: Installing ambari-server ...'.format( LOG.info(_LI('{0}: Installing ambari-server ...').format(
self.instance.hostname())) self.instance.hostname()))
r.execute_command('yum -y install ambari-server', run_as_root=True) r.execute_command('yum -y install ambari-server', run_as_root=True)
LOG.info('Running Ambari Server setup ...') LOG.info(_LI('Running Ambari Server setup ...'))
# remove postgres data directory as a precaution since its existence # remove postgres data directory as a precaution since its existence
# has prevented successful postgres installation # has prevented successful postgres installation
r.execute_command('rm -rf /var/lib/pgsql/data', run_as_root=True) r.execute_command('rm -rf /var/lib/pgsql/data', run_as_root=True)
@ -151,7 +153,7 @@ class HadoopServer:
self._configure_ambari_server_api_port(port) self._configure_ambari_server_api_port(port)
LOG.info('Starting Ambari ...') LOG.info(_LI('Starting Ambari ...'))
# NOTE(dmitryme): Reading stdout from 'ambari-server start' # NOTE(dmitryme): Reading stdout from 'ambari-server start'
# hangs ssh. Redirecting output to /dev/null fixes that # hangs ssh. Redirecting output to /dev/null fixes that
r.execute_command( r.execute_command(
@ -175,7 +177,7 @@ class HadoopServer:
@saharautils.inject_remote('r') @saharautils.inject_remote('r')
def _setup_and_start_ambari_agent(self, ambari_server_ip, r): def _setup_and_start_ambari_agent(self, ambari_server_ip, r):
LOG.info('{0}: Installing Ambari Agent ...'.format( LOG.info(_LI('{0}: Installing Ambari Agent ...').format(
self.instance.hostname())) self.instance.hostname()))
r.execute_command('yum -y install ambari-agent', run_as_root=True) r.execute_command('yum -y install ambari-agent', run_as_root=True)
@ -187,7 +189,8 @@ class HadoopServer:
ambari_server_ip) ambari_server_ip)
LOG.info( LOG.info(
'{0}: Starting Ambari Agent ...'.format(self.instance.hostname())) _LI('{0}: Starting Ambari Agent ...').format(
self.instance.hostname()))
# If the HDP 2 ambari agent is pre-installed on an image, the agent # If the HDP 2 ambari agent is pre-installed on an image, the agent
# will start up during instance launch and therefore the agent # will start up during instance launch and therefore the agent
# registration will fail. It is therefore more appropriate to call # registration will fail. It is therefore more appropriate to call

View File

@ -20,6 +20,7 @@ import six
from sahara import exceptions as e from sahara import exceptions as e
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils from sahara.plugins.general import utils
from sahara.swift import swift_helper as h from sahara.swift import swift_helper as h
@ -518,8 +519,8 @@ class HBaseService(Service):
configurations['global']['hbase_hdfs_root_dir'] = match.group(3) configurations['global']['hbase_hdfs_root_dir'] = match.group(3)
else: else:
raise e.InvalidDataException( raise e.InvalidDataException(
"Invalid value for property 'hbase-site/hbase.rootdir' : %s" % _("Invalid value for property 'hbase-site/hbase.rootdir' : %s")
user_input.value) % user_input.value)
def finalize_configuration(self, cluster_spec): def finalize_configuration(self, cluster_spec):
nn_servers = cluster_spec.determine_component_hosts('NAMENODE') nn_servers = cluster_spec.determine_component_hosts('NAMENODE')

View File

@ -22,7 +22,11 @@ import requests
from sahara import context from sahara import context
from sahara import exceptions as exc from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LC
from sahara.i18n import _LE
from sahara.i18n import _LI from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import configprovider as cfgprov from sahara.plugins.hdp import configprovider as cfgprov
@ -166,9 +170,9 @@ class AmbariClient():
self.handler.get_version() + '"}}') self.handler.get_version() + '"}}')
if result.status_code != 201: if result.status_code != 201:
LOG.error('Create cluster command failed. %s' % result.text) LOG.error(_LE('Create cluster command failed. %s') % result.text)
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add cluster: %s' % result.text) _('Failed to add cluster: %s') % result.text)
def _add_configurations_to_cluster( def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name): self, cluster_spec, ambari_info, name):
@ -214,10 +218,10 @@ class AmbariClient():
result = self._put(config_url, ambari_info, data=json.dumps(body)) result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200: if result.status_code != 200:
LOG.error( LOG.error(
'Set configuration command failed. {0}'.format( _LE('Set configuration command failed. {0}').format(
result.text)) result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to set configurations on cluster: %s' _('Failed to set configurations on cluster: %s')
% result.text) % result.text)
def _add_services_to_cluster(self, cluster_spec, ambari_info, name): def _add_services_to_cluster(self, cluster_spec, ambari_info, name):
@ -230,10 +234,11 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code not in [201, 409]: if result.status_code not in [201, 409]:
LOG.error( LOG.error(
'Create service command failed. {0}'.format( _LE('Create service command failed. {0}').format(
result.text)) result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add services to cluster: %s' % result.text) _('Failed to add services to cluster: %s')
% result.text)
def _add_components_to_services(self, cluster_spec, ambari_info, name): def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{' add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
@ -247,10 +252,10 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code not in [201, 409]: if result.status_code not in [201, 409]:
LOG.error( LOG.error(
'Create component command failed. {0}'.format( _LE('Create component command failed. {0}').format(
result.text)) result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add components to services: %s' _('Failed to add components to services: %s')
% result.text) % result.text)
def _add_hosts_and_components( def _add_hosts_and_components(
@ -266,9 +271,9 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code != 201: if result.status_code != 201:
LOG.error( LOG.error(
'Create host command failed. {0}'.format(result.text)) _LE('Create host command failed. {0}').format(result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add host: %s' % result.text) _('Failed to add host: %s') % result.text)
node_group_name = host.node_group.name node_group_name = host.node_group.name
# TODO(jspeidel): ensure that node group exists # TODO(jspeidel): ensure that node group exists
@ -281,13 +286,14 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code != 201: if result.status_code != 201:
LOG.error( LOG.error(
'Create host_component command failed. %s' % _LE('Create host_component command failed. %s'),
result.text) result.text)
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add host component: %s' % result.text) _('Failed to add host component: %s')
% result.text)
def _install_services(self, cluster_name, ambari_info): def _install_services(self, cluster_name, ambari_info):
LOG.info('Installing required Hadoop services ...') LOG.info(_LI('Installing required Hadoop services ...'))
ambari_address = ambari_info.get_address() ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{' install_url = ('http://{0}/api/v1/clusters/{'
@ -305,17 +311,17 @@ class AmbariClient():
ambari_info, cluster_name, request_id), ambari_info, cluster_name, request_id),
ambari_info) ambari_info)
if success: if success:
LOG.info("Install of Hadoop stack successful.") LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info) self._finalize_ambari_state(ambari_info)
else: else:
LOG.critical('Install command failed.') LOG.critical(_LC('Install command failed.'))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Installation of Hadoop stack failed.') _('Installation of Hadoop stack failed.'))
elif result.status_code != 200: elif result.status_code != 200:
LOG.error( LOG.error(
'Install command failed. {0}'.format(result.text)) _LE('Install command failed. {0}').format(result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Installation of Hadoop stack failed.') _('Installation of Hadoop stack failed.'))
def _get_async_request_uri(self, ambari_info, cluster_name, request_id): def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return ('http://{0}/api/v1/clusters/{1}/requests/{' return ('http://{0}/api/v1/clusters/{1}/requests/{'
@ -343,7 +349,7 @@ class AmbariClient():
return started return started
def _finalize_ambari_state(self, ambari_info): def _finalize_ambari_state(self, ambari_info):
LOG.info('Finalizing Ambari cluster state.') LOG.info(_LI('Finalizing Ambari cluster state.'))
persist_state_uri = 'http://{0}/api/v1/persist'.format( persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address()) ambari_info.get_address())
@ -354,14 +360,17 @@ class AmbariClient():
result = self._post(persist_state_uri, ambari_info, data=persist_data) result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202: if result.status_code != 201 and result.status_code != 202:
LOG.warning('Finalizing of Ambari cluster state failed. {0}'. LOG.warning(_LW('Finalizing of Ambari cluster state failed. {0}').
format(result.text)) format(result.text))
raise ex.HadoopProvisionError('Unable to finalize Ambari state.') raise ex.HadoopProvisionError(_('Unable to finalize Ambari '
'state.'))
def start_services(self, cluster_name, cluster_spec, ambari_info): def start_services(self, cluster_name, cluster_spec, ambari_info):
LOG.info('Starting Hadoop services ...') LOG.info(_LI('Starting Hadoop services ...'))
LOG.info('Cluster name: {0}, Ambari server address: {1}' LOG.info(_LI('Cluster name: %(cluster_name)s, Ambari server address: '
.format(cluster_name, ambari_info.get_address())) '%(server_address)s'),
{'cluster_name': cluster_name,
'server_address': ambari_info.get_address()})
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/' start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format( 'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name)) ambari_info.get_address(), cluster_name))
@ -379,19 +388,20 @@ class AmbariClient():
request_id), ambari_info) request_id), ambari_info)
if success: if success:
LOG.info( LOG.info(
"Successfully started Hadoop cluster '{0}'.".format( _LI("Successfully started Hadoop cluster '{0}'.").format(
cluster_name)) cluster_name))
else: else:
LOG.critical('Failed to start Hadoop cluster.') LOG.critical(_LC('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Start of Hadoop services failed.') _('Start of Hadoop services failed.'))
elif result.status_code != 200: elif result.status_code != 200:
LOG.error( LOG.error(
'Start command failed. Status: {0}, response: {1}'. _LE('Start command failed. Status: %(status)s, '
format(result.status_code, result.text)) 'response: %(response)s'),
{'status': result.status_code, 'response': result.text})
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Start of Hadoop services failed.') _('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri): def _exec_ambari_command(self, ambari_info, body, cmd_uri):
@ -405,18 +415,19 @@ class AmbariClient():
success = self._wait_for_async_request(href, ambari_info) success = self._wait_for_async_request(href, ambari_info)
if success: if success:
LOG.info( LOG.info(
"Successfully changed state of Hadoop components ") _LI("Successfully changed state of Hadoop components "))
else: else:
LOG.critical('Failed to change state of Hadoop ' LOG.critical(_LC('Failed to change state of Hadoop '
'components') 'components'))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to change state of Hadoop components') _('Failed to change state of Hadoop components'))
else: else:
LOG.error( LOG.error(
'Command failed. Status: {0}, response: {1}'. _LE('Command failed. Status: %(status)s, response: '
format(result.status_code, result.text)) '%(response)s'),
raise ex.HadoopProvisionError('Hadoop/Ambari command failed.') {'status': result.status_code, 'response': result.text})
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers): def _get_host_list(self, servers):
host_list = [server.instance.fqdn().lower() for server in servers] host_list = [server.instance.fqdn().lower() for server in servers]
@ -432,9 +443,10 @@ class AmbariClient():
servers, cluster_spec) servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers): def _install_components(self, ambari_info, auth, cluster_name, servers):
LOG.info('Starting Hadoop components while scaling up') LOG.info(_LI('Starting Hadoop components while scaling up'))
LOG.info('Cluster name {0}, Ambari server ip {1}' LOG.info(_LI('Cluster name %(cluster_name)s, Ambari server ip %(ip)s'),
.format(cluster_name, ambari_info.get_address())) {'cluster_name': cluster_name,
'ip': ambari_info.get_address()})
# query for the host components on the given hosts that are in the # query for the host components on the given hosts that are in the
# INIT state # INIT state
# TODO(jspeidel): provide request context # TODO(jspeidel): provide request context
@ -482,13 +494,13 @@ class AmbariClient():
self._exec_ambari_command(ambari_info, body, start_uri) self._exec_ambari_command(ambari_info, body, start_uri)
else: else:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Unable to determine installed service ' _('Unable to determine installed service '
'components in scaled instances. status' 'components in scaled instances. status'
' code returned = {0}'.format(result.status)) ' code returned = {0}').format(result.status))
def wait_for_host_registrations(self, num_hosts, ambari_info): def wait_for_host_registrations(self, num_hosts, ambari_info):
LOG.info( LOG.info(
'Waiting for all Ambari agents to register with server ...') _LI('Waiting for all Ambari agents to register with server ...'))
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address()) url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
result = None result = None
@ -501,14 +513,16 @@ class AmbariClient():
result = self._get(url, ambari_info) result = self._get(url, ambari_info)
json_result = json.loads(result.text) json_result = json.loads(result.text)
LOG.info('Registered Hosts: {0} of {1}'.format( LOG.info(_LI('Registered Hosts: %(current_number)s of '
len(json_result['items']), num_hosts)) '%(final_number)s'),
{'current_number': len(json_result['items']),
'final_number': num_hosts})
for hosts in json_result['items']: for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format( LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name'])) hosts['Hosts']['host_name']))
except requests.ConnectionError: except requests.ConnectionError:
# TODO(jspeidel): max wait time # TODO(jspeidel): max wait time
LOG.info('Waiting to connect to ambari server ...') LOG.info(_LI('Waiting to connect to ambari server ...'))
def update_ambari_admin_user(self, password, ambari_info): def update_ambari_admin_user(self, password, ambari_info):
old_pwd = ambari_info.password old_pwd = ambari_info.password
@ -520,8 +534,8 @@ class AmbariClient():
result = self._put(user_url, ambari_info, data=update_body) result = self._put(user_url, ambari_info, data=update_body)
if result.status_code != 200: if result.status_code != 200:
raise ex.HadoopProvisionError('Unable to update Ambari admin user' raise ex.HadoopProvisionError(_('Unable to update Ambari admin '
' credentials: {0}'.format( 'user credentials: {0}').format(
result.text)) result.text))
def add_ambari_user(self, user, ambari_info): def add_ambari_user(self, user, ambari_info):
@ -536,7 +550,7 @@ class AmbariClient():
if result.status_code != 201: if result.status_code != 201:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Unable to create Ambari user: {0}'.format(result.text)) _('Unable to create Ambari user: {0}').format(result.text))
def delete_ambari_user(self, user_name, ambari_info): def delete_ambari_user(self, user_name, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format( user_url = 'http://{0}/api/v1/users/{1}'.format(
@ -546,8 +560,9 @@ class AmbariClient():
if result.status_code != 200: if result.status_code != 200:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Unable to delete Ambari user: {0}' _('Unable to delete Ambari user: %(user_name)s'
' : {1}'.format(user_name, result.text)) ' : %(text)s') %
{'user_name': user_name, 'text': result.text})
def configure_scaled_cluster_instances(self, name, cluster_spec, def configure_scaled_cluster_instances(self, name, cluster_spec,
num_hosts, ambari_info): num_hosts, ambari_info):
@ -570,9 +585,9 @@ class AmbariClient():
def decommission_cluster_instances(self, cluster, clusterspec, instances, def decommission_cluster_instances(self, cluster, clusterspec, instances,
ambari_info): ambari_info):
raise exc.InvalidException('The HDP plugin does not support ' raise exc.InvalidException(_('The HDP plugin does not support '
'the decommissioning of nodes ' 'the decommissioning of nodes '
'for HDP version 1.3.2') 'for HDP version 1.3.2'))
def provision_cluster(self, cluster_spec, servers, ambari_info, name): def provision_cluster(self, cluster_spec, servers, ambari_info, name):
self._add_cluster(ambari_info, name) self._add_cluster(ambari_info, name)

View File

@ -19,6 +19,9 @@ from oslo.config import cfg
import six import six
from sahara import exceptions as e from sahara import exceptions as e
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils from sahara.plugins.general import utils
@ -568,8 +571,8 @@ class HBaseService(Service):
configurations['global']['hbase_hdfs_root_dir'] = match.group(3) configurations['global']['hbase_hdfs_root_dir'] = match.group(3)
else: else:
raise e.InvalidDataException( raise e.InvalidDataException(
"Invalid value for property 'hbase-site/hbase.rootdir' : %s" % _("Invalid value for property 'hbase-site/hbase.rootdir' : %s")
user_input.value) % user_input.value)
def finalize_configuration(self, cluster_spec): def finalize_configuration(self, cluster_spec):
nn_servers = cluster_spec.determine_component_hosts('NAMENODE') nn_servers = cluster_spec.determine_component_hosts('NAMENODE')
@ -910,16 +913,16 @@ class HueService(Service):
def _create_hue_property_tree(cluster_spec): def _create_hue_property_tree(cluster_spec):
config_name = 'hue-ini' config_name = 'hue-ini'
LOG.info('Creating Hue ini property tree from configuration named ' LOG.info(_LI('Creating Hue ini property tree from configuration named '
'{0}'.format(config_name)) '{0}').format(config_name))
hue_ini_property_tree = {'sections': {}, 'properties': {}} hue_ini_property_tree = {'sections': {}, 'properties': {}}
config = cluster_spec.configurations[config_name] config = cluster_spec.configurations[config_name]
if config is None: if config is None:
LOG.warning('Missing configuration named {0}, aborting Hue ini ' LOG.warning(_LW('Missing configuration named {0}, aborting Hue ini'
'file creation'.format(config_name)) ' file creation').format(config_name))
else: else:
# replace values in hue-ini configuration # replace values in hue-ini configuration
subs = {} subs = {}
@ -1010,18 +1013,19 @@ class HueService(Service):
@staticmethod @staticmethod
def _merge_configurations(cluster_spec, src_config_name, dst_config_name): def _merge_configurations(cluster_spec, src_config_name, dst_config_name):
LOG.info('Merging configuration properties: {0} -> {1}' LOG.info(_LI('Merging configuration properties: %(source)s -> '
.format(src_config_name, dst_config_name)) '%(destination)s'),
{'source': src_config_name, 'destination': dst_config_name})
src_config = cluster_spec.configurations[src_config_name] src_config = cluster_spec.configurations[src_config_name]
dst_config = cluster_spec.configurations[dst_config_name] dst_config = cluster_spec.configurations[dst_config_name]
if src_config is None: if src_config is None:
LOG.warning('Missing source configuration property set, aborting ' LOG.warning(_LW('Missing source configuration property set, '
'merge: {0}'.format(src_config_name)) 'aborting merge: {0}').format(src_config_name))
elif dst_config is None: elif dst_config is None:
LOG.warning('Missing destination configuration property set, ' LOG.warning(_LW('Missing destination configuration property set, '
'aborting merge: {0}'.format(dst_config_name)) 'aborting merge: {0}').format(dst_config_name))
else: else:
for property_name, property_value in six.iteritems(src_config): for property_name, property_value in six.iteritems(src_config):
if property_name in dst_config: if property_name in dst_config:
@ -1031,14 +1035,16 @@ class HueService(Service):
src_config_name, src_config_name,
property_name)) property_name))
else: else:
LOG.warning('Overwriting existing configuration ' LOG.warning(_LW('Overwriting existing configuration '
'property in {0} from {1} for Hue: {2} ' 'property in %(dst_config_name)s from '
'[{3} -> {4}]' '%(src_config_name)s for Hue: '
.format(dst_config_name, '%(property_name)s '
src_config_name, '[%(dst_config)s -> %(src_config)s]'),
property_name, {'dst_config_name': dst_config_name,
dst_config[property_name], 'src_config_name': src_config_name,
src_config[property_name])) 'property_name': property_name,
'dst_config': dst_config[property_name],
'src_config': src_config[property_name]})
else: else:
LOG.debug('Adding Hue configuration property to {0} from ' LOG.debug('Adding Hue configuration property to {0} from '
'{1}: {2}'.format(dst_config_name, '{1}: {2}'.format(dst_config_name,
@ -1051,19 +1057,19 @@ class HueService(Service):
def _handle_pre_service_start(instance, cluster_spec, hue_ini, def _handle_pre_service_start(instance, cluster_spec, hue_ini,
create_user): create_user):
with instance.remote() as r: with instance.remote() as r:
LOG.info('Installing Hue on {0}' LOG.info(_LI('Installing Hue on {0}')
.format(instance.fqdn())) .format(instance.fqdn()))
r.execute_command('yum -y install hue', r.execute_command('yum -y install hue',
run_as_root=True) run_as_root=True)
LOG.info('Setting Hue configuration on {0}' LOG.info(_LI('Setting Hue configuration on {0}')
.format(instance.fqdn())) .format(instance.fqdn()))
r.write_file_to('/etc/hue/conf/hue.ini', r.write_file_to('/etc/hue/conf/hue.ini',
hue_ini, hue_ini,
True) True)
LOG.info('Uninstalling Shell, if it is installed ' LOG.info(_LI('Uninstalling Shell, if it is installed '
'on {0}'.format(instance.fqdn())) 'on {0}').format(instance.fqdn()))
r.execute_command( r.execute_command(
'/usr/lib/hue/build/env/bin/python ' '/usr/lib/hue/build/env/bin/python '
'/usr/lib/hue/tools/app_reg/app_reg.py ' '/usr/lib/hue/tools/app_reg/app_reg.py '
@ -1071,12 +1077,12 @@ class HueService(Service):
run_as_root=True) run_as_root=True)
if create_user: if create_user:
LOG.info('Creating initial Hue user on {0}' LOG.info(_LI('Creating initial Hue user on {0}')
.format(instance.fqdn())) .format(instance.fqdn()))
r.execute_command('/usr/lib/hue/build/env/bin/hue ' r.execute_command('/usr/lib/hue/build/env/bin/hue '
'create_sandbox_user', run_as_root=True) 'create_sandbox_user', run_as_root=True)
LOG.info('(Re)starting Hue on {0}' LOG.info(_LI('(Re)starting Hue on {0}')
.format(instance.fqdn())) .format(instance.fqdn()))
java_home = HueService._get_java_home(cluster_spec) java_home = HueService._get_java_home(cluster_spec)
@ -1167,15 +1173,15 @@ class HueService(Service):
components = hue_ng.components components = hue_ng.components
if 'HDFS_CLIENT' not in components: if 'HDFS_CLIENT' not in components:
LOG.info('Missing HDFS client from Hue node... adding it ' LOG.info(_LI('Missing HDFS client from Hue node... adding '
'since it is required for Hue') 'it since it is required for Hue'))
components.append('HDFS_CLIENT') components.append('HDFS_CLIENT')
if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'): if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'):
if 'HIVE_CLIENT' not in components: if 'HIVE_CLIENT' not in components:
LOG.info('Missing HIVE client from Hue node... adding ' LOG.info(_LI('Missing HIVE client from Hue node... '
'it since it is required for Beeswax and ' 'adding it since it is required for '
'HCatalog') 'Beeswax and HCatalog'))
components.append('HIVE_CLIENT') components.append('HIVE_CLIENT')
def pre_service_start(self, cluster_spec, ambari_info, started_services): def pre_service_start(self, cluster_spec, ambari_info, started_services):

View File

@ -22,7 +22,11 @@ import requests
from sahara import context from sahara import context
from sahara import exceptions as exc from sahara import exceptions as exc
from sahara.i18n import _
from sahara.i18n import _LC
from sahara.i18n import _LE
from sahara.i18n import _LI from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.hdp import clusterspec as cs from sahara.plugins.hdp import clusterspec as cs
from sahara.plugins.hdp import configprovider as cfgprov from sahara.plugins.hdp import configprovider as cfgprov
@ -151,9 +155,9 @@ class AmbariClient():
self.handler.get_version() + '"}}') self.handler.get_version() + '"}}')
if result.status_code != 201: if result.status_code != 201:
LOG.error('Create cluster command failed. %s' % result.text) LOG.error(_LE('Create cluster command failed. %s'), result.text)
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add cluster: %s' % result.text) _('Failed to add cluster: %s') % result.text)
def _add_configurations_to_cluster( def _add_configurations_to_cluster(
self, cluster_spec, ambari_info, name): self, cluster_spec, ambari_info, name):
@ -199,10 +203,10 @@ class AmbariClient():
result = self._put(config_url, ambari_info, data=json.dumps(body)) result = self._put(config_url, ambari_info, data=json.dumps(body))
if result.status_code != 200: if result.status_code != 200:
LOG.error( LOG.error(
'Set configuration command failed. {0}'.format( _LE('Set configuration command failed. {0}').format(
result.text)) result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to set configurations on cluster: %s' _('Failed to set configurations on cluster: %s')
% result.text) % result.text)
def _add_services_to_cluster(self, cluster_spec, ambari_info, name): def _add_services_to_cluster(self, cluster_spec, ambari_info, name):
@ -216,10 +220,11 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code not in [201, 409]: if result.status_code not in [201, 409]:
LOG.error( LOG.error(
'Create service command failed. {0}'.format( _LE('Create service command failed. {0}').format(
result.text)) result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add services to cluster: %s' % result.text) _('Failed to add services to cluster: %s')
% result.text)
def _add_components_to_services(self, cluster_spec, ambari_info, name): def _add_components_to_services(self, cluster_spec, ambari_info, name):
add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{' add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{'
@ -234,10 +239,10 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code not in [201, 409]: if result.status_code not in [201, 409]:
LOG.error( LOG.error(
'Create component command failed. {0}'.format( _LE('Create component command failed. {0}').format(
result.text)) result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add components to services: %s' _('Failed to add components to services: %s')
% result.text) % result.text)
def _add_hosts_and_components( def _add_hosts_and_components(
@ -253,9 +258,9 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code != 201: if result.status_code != 201:
LOG.error( LOG.error(
'Create host command failed. {0}'.format(result.text)) _LE('Create host command failed. {0}').format(result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add host: %s' % result.text) _('Failed to add host: %s') % result.text)
node_group_name = host.node_group.name node_group_name = host.node_group.name
# TODO(jspeidel): ensure that node group exists # TODO(jspeidel): ensure that node group exists
@ -271,13 +276,14 @@ class AmbariClient():
ambari_info) ambari_info)
if result.status_code != 201: if result.status_code != 201:
LOG.error( LOG.error(
'Create host_component command failed. %s' % _LE('Create host_component command failed. %s'),
result.text) result.text)
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to add host component: %s' % result.text) _('Failed to add host component: %s')
% result.text)
def _install_services(self, cluster_name, ambari_info): def _install_services(self, cluster_name, ambari_info):
LOG.info('Installing required Hadoop services ...') LOG.info(_LI('Installing required Hadoop services ...'))
ambari_address = ambari_info.get_address() ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{' install_url = ('http://{0}/api/v1/clusters/{'
@ -295,17 +301,17 @@ class AmbariClient():
ambari_info, cluster_name, request_id), ambari_info, cluster_name, request_id),
ambari_info) ambari_info)
if success: if success:
LOG.info("Install of Hadoop stack successful.") LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info) self._finalize_ambari_state(ambari_info)
else: else:
LOG.critical('Install command failed.') LOG.critical(_LC('Install command failed.'))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Installation of Hadoop stack failed.') _('Installation of Hadoop stack failed.'))
elif result.status_code != 200: elif result.status_code != 200:
LOG.error( LOG.error(
'Install command failed. {0}'.format(result.text)) _LE('Install command failed. {0}').format(result.text))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Installation of Hadoop stack failed.') _('Installation of Hadoop stack failed.'))
def _get_async_request_uri(self, ambari_info, cluster_name, request_id): def _get_async_request_uri(self, ambari_info, cluster_name, request_id):
return ('http://{0}/api/v1/clusters/{1}/requests/{' return ('http://{0}/api/v1/clusters/{1}/requests/{'
@ -338,7 +344,7 @@ class AmbariClient():
return started return started
def _finalize_ambari_state(self, ambari_info): def _finalize_ambari_state(self, ambari_info):
LOG.info('Finalizing Ambari cluster state.') LOG.info(_LI('Finalizing Ambari cluster state.'))
persist_state_uri = 'http://{0}/api/v1/persist'.format( persist_state_uri = 'http://{0}/api/v1/persist'.format(
ambari_info.get_address()) ambari_info.get_address())
@ -349,14 +355,17 @@ class AmbariClient():
result = self._post(persist_state_uri, ambari_info, data=persist_data) result = self._post(persist_state_uri, ambari_info, data=persist_data)
if result.status_code != 201 and result.status_code != 202: if result.status_code != 201 and result.status_code != 202:
LOG.warning('Finalizing of Ambari cluster state failed. {0}'. LOG.warning(_LW('Finalizing of Ambari cluster state failed. {0}').
format(result.text)) format(result.text))
raise ex.HadoopProvisionError('Unable to finalize Ambari state.') raise ex.HadoopProvisionError(
_('Unable to finalize Ambari state.'))
def start_services(self, cluster_name, cluster_spec, ambari_info): def start_services(self, cluster_name, cluster_spec, ambari_info):
LOG.info('Starting Hadoop services ...') LOG.info(_LI('Starting Hadoop services ...'))
LOG.info('Cluster name: {0}, Ambari server address: {1}' LOG.info(_LI('Cluster name: %(cluster_name)s, Ambari server address:'
.format(cluster_name, ambari_info.get_address())) ' %(server_address)s'),
{'cluster_name': cluster_name,
'server_address': ambari_info.get_address()})
start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/' start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/'
'state=INSTALLED'.format( 'state=INSTALLED'.format(
ambari_info.get_address(), cluster_name)) ambari_info.get_address(), cluster_name))
@ -374,19 +383,20 @@ class AmbariClient():
request_id), ambari_info) request_id), ambari_info)
if success: if success:
LOG.info( LOG.info(
"Successfully started Hadoop cluster '{0}'.".format( _LI("Successfully started Hadoop cluster '{0}'.").format(
cluster_name)) cluster_name))
else: else:
LOG.critical('Failed to start Hadoop cluster.') LOG.critical(_LC('Failed to start Hadoop cluster.'))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Start of Hadoop services failed.') _('Start of Hadoop services failed.'))
elif result.status_code != 200: elif result.status_code != 200:
LOG.error( LOG.error(
'Start command failed. Status: {0}, response: {1}'. _LE('Start command failed. Status: %(status)s, response: '
format(result.status_code, result.text)) '%(response)s'),
{'status': result.status_code, 'result': result.text})
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Start of Hadoop services failed.') _('Start of Hadoop services failed.'))
def _exec_ambari_command(self, ambari_info, body, cmd_uri): def _exec_ambari_command(self, ambari_info, body, cmd_uri):
@ -400,18 +410,19 @@ class AmbariClient():
success = self._wait_for_async_request(href, ambari_info) success = self._wait_for_async_request(href, ambari_info)
if success: if success:
LOG.info( LOG.info(
"Successfully changed state of Hadoop components ") _LI("Successfully changed state of Hadoop components "))
else: else:
LOG.critical('Failed to change state of Hadoop ' LOG.critical(_LC('Failed to change state of Hadoop '
'components') 'components'))
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Failed to change state of Hadoop components') _('Failed to change state of Hadoop components'))
else: else:
LOG.error( LOG.error(
'Command failed. Status: {0}, response: {1}'. _LE('Command failed. Status: %(status)s, response: '
format(result.status_code, result.text)) '%(response)s'),
raise ex.HadoopProvisionError('Hadoop/Ambari command failed.') {'status': result.status_code, 'result': result.text})
raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.'))
def _get_host_list(self, servers): def _get_host_list(self, servers):
host_list = [server.instance.fqdn().lower() for server in servers] host_list = [server.instance.fqdn().lower() for server in servers]
@ -427,9 +438,10 @@ class AmbariClient():
servers, cluster_spec) servers, cluster_spec)
def _install_components(self, ambari_info, auth, cluster_name, servers): def _install_components(self, ambari_info, auth, cluster_name, servers):
LOG.info('Starting Hadoop components while scaling up') LOG.info(_LI('Starting Hadoop components while scaling up'))
LOG.info('Cluster name {0}, Ambari server ip {1}' LOG.info(_LI('Cluster name %(cluster_name)s, Ambari server ip %(ip)s'),
.format(cluster_name, ambari_info.get_address())) {'cluster_name': cluster_name,
'ip': ambari_info.get_address()})
# query for the host components on the given hosts that are in the # query for the host components on the given hosts that are in the
# INIT state # INIT state
# TODO(jspeidel): provide request context # TODO(jspeidel): provide request context
@ -477,13 +489,13 @@ class AmbariClient():
self._exec_ambari_command(ambari_info, body, start_uri) self._exec_ambari_command(ambari_info, body, start_uri)
else: else:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Unable to determine installed service ' _('Unable to determine installed service '
'components in scaled instances. status' 'components in scaled instances. status'
' code returned = {0}'.format(result.status)) ' code returned = {0}').format(result.status))
def wait_for_host_registrations(self, num_hosts, ambari_info): def wait_for_host_registrations(self, num_hosts, ambari_info):
LOG.info( LOG.info(
'Waiting for all Ambari agents to register with server ...') _LI('Waiting for all Ambari agents to register with server ...'))
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address()) url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
result = None result = None
@ -496,14 +508,16 @@ class AmbariClient():
result = self._get(url, ambari_info) result = self._get(url, ambari_info)
json_result = json.loads(result.text) json_result = json.loads(result.text)
LOG.info('Registered Hosts: {0} of {1}'.format( LOG.info(_LI('Registered Hosts: %(current_number)s '
len(json_result['items']), num_hosts)) 'of %(final_number)s'),
{'current_number': len(json_result['items']),
'final_number': num_hosts})
for hosts in json_result['items']: for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format( LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name'])) hosts['Hosts']['host_name']))
except requests.ConnectionError: except requests.ConnectionError:
# TODO(jspeidel): max wait time # TODO(jspeidel): max wait time
LOG.info('Waiting to connect to ambari server ...') LOG.info(_LI('Waiting to connect to ambari server ...'))
def update_ambari_admin_user(self, password, ambari_info): def update_ambari_admin_user(self, password, ambari_info):
old_pwd = ambari_info.password old_pwd = ambari_info.password
@ -515,8 +529,8 @@ class AmbariClient():
result = self._put(user_url, ambari_info, data=update_body) result = self._put(user_url, ambari_info, data=update_body)
if result.status_code != 200: if result.status_code != 200:
raise ex.HadoopProvisionError('Unable to update Ambari admin user' raise ex.HadoopProvisionError(_('Unable to update Ambari admin '
' credentials: {0}'.format( 'user credentials: {0}').format(
result.text)) result.text))
def add_ambari_user(self, user, ambari_info): def add_ambari_user(self, user, ambari_info):
@ -531,7 +545,7 @@ class AmbariClient():
if result.status_code != 201: if result.status_code != 201:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Unable to create Ambari user: {0}'.format(result.text)) _('Unable to create Ambari user: {0}').format(result.text))
def delete_ambari_user(self, user_name, ambari_info): def delete_ambari_user(self, user_name, ambari_info):
user_url = 'http://{0}/api/v1/users/{1}'.format( user_url = 'http://{0}/api/v1/users/{1}'.format(
@ -541,8 +555,9 @@ class AmbariClient():
if result.status_code != 200: if result.status_code != 200:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
'Unable to delete Ambari user: {0}' _('Unable to delete Ambari user: %(user_name)s'
' : {1}'.format(user_name, result.text)) ' : %(text)s') %
{'user_name': user_name, 'text': result.text})
def configure_scaled_cluster_instances(self, name, cluster_spec, def configure_scaled_cluster_instances(self, name, cluster_spec,
num_hosts, ambari_info): num_hosts, ambari_info):
@ -609,17 +624,17 @@ class AmbariClient():
# ask Ambari to decommission the datanodes # ask Ambari to decommission the datanodes
result = self._post(request_uri, ambari_info, request_body) result = self._post(request_uri, ambari_info, request_body)
if result.status_code != 202: if result.status_code != 202:
LOG.error('AmbariClient: error while making decommision post ' + LOG.error(_LE('AmbariClient: error while making decommision post '
'request. Error is = ' + result.text) 'request. Error is = %s'), result.text)
raise exc.InvalidException( raise exc.InvalidException(
'An error occurred while trying to ' + _('An error occurred while trying to '
'decommission the DataNode instances that are ' + 'decommission the DataNode instances that are '
'being shut down. ' + 'being shut down. '
'Please consult the Ambari server logs on the ' + 'Please consult the Ambari server logs on the '
'master node for ' + 'master node for '
'more information about the failure.') 'more information about the failure.'))
else: else:
LOG.info('AmbariClient: decommission post request succeeded!') LOG.info(_LI('AmbariClient: decommission post request succeeded!'))
status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/' status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/'
'host_components/{3}') 'host_components/{3}')
@ -637,17 +652,17 @@ class AmbariClient():
count = 0 count = 0
while count < 100 and len(hosts_to_decommission) > 0: while count < 100 and len(hosts_to_decommission) > 0:
LOG.info('AmbariClient: number of hosts waiting for ' + LOG.info(_LI('AmbariClient: number of hosts waiting for '
'decommisioning to complete = ' + 'decommisioning to complete = %s'),
str(len(hosts_to_decommission))) str(len(hosts_to_decommission)))
result = self._get(status_request, ambari_info) result = self._get(status_request, ambari_info)
if result.status_code != 200: if result.status_code != 200:
LOG.error('AmbariClient: error in making decomission status ' + LOG.error(_LE('AmbariClient: error in making decomission '
'request, error = ' + result.text) 'status request, error = %s'), result.text)
else: else:
LOG.info('AmbariClient: decommission status request ok, ' + LOG.info(_LI('AmbariClient: decommission status request ok, '
'result = ' + result.text) 'result = %s'), result.text)
json_result = json.loads(result.text) json_result = json.loads(result.text)
live_nodes = ( live_nodes = (
json_result['metrics']['dfs']['namenode']['LiveNodes']) json_result['metrics']['dfs']['namenode']['LiveNodes'])
@ -656,22 +671,24 @@ class AmbariClient():
for node in json_result_nodes.keys(): for node in json_result_nodes.keys():
admin_state = json_result_nodes[node]['adminState'] admin_state = json_result_nodes[node]['adminState']
if admin_state == 'Decommissioned': if admin_state == 'Decommissioned':
LOG.info('AmbariClient: node = ' + node + LOG.info(_LI('AmbariClient: node = %(node)s is '
' is now in adminState = ' + 'now in adminState = %(admin_state)s'),
admin_state) {'node': node,
'admin_state': admin_state})
# remove from list, to track which nodes # remove from list, to track which nodes
# are now in Decommissioned state # are now in Decommissioned state
hosts_to_decommission.remove(node) hosts_to_decommission.remove(node)
LOG.info('AmbariClient: sleeping for 5 seconds') LOG.info(_LI('AmbariClient: sleeping for 5 seconds'))
context.sleep(5) context.sleep(5)
# increment loop counter # increment loop counter
count += 1 count += 1
if len(hosts_to_decommission) > 0: if len(hosts_to_decommission) > 0:
LOG.error('AmbariClient: decommissioning process timed-out ' + LOG.error(_LE('AmbariClient: decommissioning process timed-out '
'waiting for nodes to enter "Decommissioned" status.') 'waiting for nodes to enter "Decommissioned" '
'status.'))
def provision_cluster(self, cluster_spec, servers, ambari_info, name): def provision_cluster(self, cluster_spec, servers, ambari_info, name):
self._add_cluster(ambari_info, name) self._add_cluster(ambari_info, name)

View File

@ -15,6 +15,7 @@
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import base as plugins_base from sahara.plugins import base as plugins_base
from sahara.utils import resources from sahara.utils import resources
@ -153,13 +154,17 @@ class ProvisioningPluginBase(plugins_base.PluginInterface):
confs = config_objs_map.get(applicable_target) confs = config_objs_map.get(applicable_target)
if not confs: if not confs:
raise ex.ConfigurationError( raise ex.ConfigurationError(
"Can't find applicable target '%s' for '%s'" _("Can't find applicable target "
% (applicable_target, config_name)) "'%(applicable_target)s' for '%(config_name)s'")
% {"applicable_target": applicable_target,
"config_name": config_name})
conf = confs.get(config_name) conf = confs.get(config_name)
if not conf: if not conf:
raise ex.ConfigurationError( raise ex.ConfigurationError(
"Can't find config '%s' in '%s'" _("Can't find config '%(config_name)s' "
% (config_name, applicable_target)) "in '%(applicable_target)s'")
% {"config_name": config_name,
"applicable_target": applicable_target})
result.append(UserInput( result.append(UserInput(
conf, configs[applicable_target][config_name])) conf, configs[applicable_target][config_name]))

View File

@ -16,6 +16,8 @@
from oslo.config import cfg from oslo.config import cfg
from sahara import conductor as c from sahara import conductor as c
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import utils from sahara.plugins.general import utils
from sahara.plugins import provisioning as p from sahara.plugins import provisioning as p
@ -202,8 +204,9 @@ def get_config_value(service, name, cluster=None):
if configs.applicable_target == service and configs.name == name: if configs.applicable_target == service and configs.name == name:
return configs.default_value return configs.default_value
raise RuntimeError("Unable to get parameter '%s' from service %s", raise RuntimeError(_("Unable to get parameter '%(param_name)s' from "
name, service) "service %(service)s"),
{'param_name': name, 'service': service})
def generate_cfg_from_general(cfg, configs, general_config, def generate_cfg_from_general(cfg, configs, general_config,
@ -215,7 +218,7 @@ def generate_cfg_from_general(cfg, configs, general_config,
for name, value in configs['general'].items(): for name, value in configs['general'].items():
if value: if value:
cfg = _set_config(cfg, general_config, name) cfg = _set_config(cfg, general_config, name)
LOG.info("Applying config: %s" % name) LOG.info(_LI("Applying config: %s"), name)
else: else:
cfg = _set_config(cfg, general_config) cfg = _set_config(cfg, general_config)
return cfg return cfg

View File

@ -19,6 +19,8 @@ from oslo.config import cfg
from sahara import conductor from sahara import conductor
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils from sahara.plugins.general import utils
@ -48,8 +50,7 @@ class SparkProvider(p.ProvisioningPluginBase):
return "Apache Spark" return "Apache Spark"
def get_description(self): def get_description(self):
return ( return _("This plugin provides an ability to launch Spark on Hadoop "
"This plugin provides an ability to launch Spark on Hadoop "
"CDH cluster without any management consoles.") "CDH cluster without any management consoles.")
def get_versions(self): def get_versions(self):
@ -70,7 +71,7 @@ class SparkProvider(p.ProvisioningPluginBase):
dn_count = sum([ng.count for ng dn_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "datanode")]) in utils.get_node_groups(cluster, "datanode")])
if dn_count < 1: if dn_count < 1:
raise ex.InvalidComponentCountException("datanode", "1 or more", raise ex.InvalidComponentCountException("datanode", _("1 or more"),
nn_count) nn_count)
# validate Spark Master Node and Spark Slaves # validate Spark Master Node and Spark Slaves
@ -84,7 +85,8 @@ class SparkProvider(p.ProvisioningPluginBase):
in utils.get_node_groups(cluster, "slave")]) in utils.get_node_groups(cluster, "slave")])
if sl_count < 1: if sl_count < 1:
raise ex.InvalidComponentCountException("Spark slave", "1 or more", raise ex.InvalidComponentCountException("Spark slave",
_("1 or more"),
sl_count) sl_count)
def update_infra(self, cluster): def update_infra(self, cluster):
@ -106,7 +108,7 @@ class SparkProvider(p.ProvisioningPluginBase):
# start the data nodes # start the data nodes
self._start_slave_datanode_processes(dn_instances) self._start_slave_datanode_processes(dn_instances)
LOG.info("Hadoop services in cluster %s have been started" % LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name) cluster.name)
with remote.get_remote(nn_instance) as r: with remote.get_remote(nn_instance) as r:
@ -118,10 +120,11 @@ class SparkProvider(p.ProvisioningPluginBase):
if sm_instance: if sm_instance:
with remote.get_remote(sm_instance) as r: with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster)) run.start_spark_master(r, self._spark_home(cluster))
LOG.info("Spark service at '%s' has been started", LOG.info(_LI("Spark service at '%s' has been started"),
sm_instance.hostname()) sm_instance.hostname())
LOG.info('Cluster %s has been started successfully' % cluster.name) LOG.info(_LI('Cluster %s has been started successfully'),
cluster.name)
self._set_cluster_info(cluster) self._set_cluster_info(cluster)
def _spark_home(self, cluster): def _spark_home(self, cluster):
@ -373,7 +376,7 @@ class SparkProvider(p.ProvisioningPluginBase):
self._start_slave_datanode_processes(instances) self._start_slave_datanode_processes(instances)
run.start_spark_master(r_master, self._spark_home(cluster)) run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info("Spark master service at '%s' has been restarted", LOG.info(_LI("Spark master service at '%s' has been restarted"),
master.hostname()) master.hostname())
def _get_scalable_processes(self): def _get_scalable_processes(self):
@ -386,8 +389,8 @@ class SparkProvider(p.ProvisioningPluginBase):
ng = ug.get_by_id(cluster.node_groups, ng_id) ng = ug.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes): if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled( raise ex.NodeGroupCannotBeScaled(
ng.name, "Spark plugin cannot scale nodegroup" ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: " + " with processes: %s") %
' '.join(ng.node_processes)) ' '.join(ng.node_processes))
def _validate_existing_ng_scaling(self, cluster, existing): def _validate_existing_ng_scaling(self, cluster, existing):
@ -400,8 +403,8 @@ class SparkProvider(p.ProvisioningPluginBase):
dn_to_delete += ng.count - existing[ng.id] dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes): if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled( raise ex.NodeGroupCannotBeScaled(
ng.name, "Spark plugin cannot scale nodegroup" ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: " + " with processes: %s") %
' '.join(ng.node_processes)) ' '.join(ng.node_processes))
dn_amount = len(utils.get_instances(cluster, "datanode")) dn_amount = len(utils.get_instances(cluster, "datanode"))
@ -410,9 +413,9 @@ class SparkProvider(p.ProvisioningPluginBase):
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled( raise ex.ClusterCannotBeScaled(
cluster.name, "Spark plugin cannot shrink cluster because " cluster.name, _("Spark plugin cannot shrink cluster because "
"there would be not enough nodes for HDFS " "there would be not enough nodes for HDFS "
"replicas (replication factor is %s)" % "replicas (replication factor is %s)") %
rep_factor) rep_factor)
def get_edp_engine(self, cluster, job_type, default_engines): def get_edp_engine(self, cluster, job_type, default_engines):

View File

@ -19,6 +19,7 @@ from oslo.utils import timeutils
import six import six
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils from sahara.plugins.general import utils
from sahara.plugins.spark import config_helper as c_helper from sahara.plugins.spark import config_helper as c_helper
@ -88,8 +89,10 @@ def decommission_dn(nn, inst_to_be_deleted, survived_inst):
if not all_found: if not all_found:
ex.DecommissionError( ex.DecommissionError(
"Cannot finish decommission of cluster %s in %d seconds" % _("Cannot finish decommission of cluster %(cluster)s in "
(nn.node_group.cluster, timeout)) "%(seconds)d seconds") %
{"cluster": nn.node_group.cluster,
"seconds": timeout})
def parse_dfs_report(cmd_output): def parse_dfs_report(cmd_output):

View File

@ -15,6 +15,7 @@
import six import six
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper
@ -296,8 +297,8 @@ def _merge_configs(a, b):
def configure_topology_data(pctx, cluster): def configure_topology_data(pctx, cluster):
if c_helper.is_data_locality_enabled(pctx, cluster): if c_helper.is_data_locality_enabled(pctx, cluster):
LOG.info("Node group awareness is not implemented in YARN yet " LOG.info(_LI("Node group awareness is not implemented in YARN yet "
"so enable_hypervisor_awareness set to False explicitly") "so enable_hypervisor_awareness set to False explicitly"))
tpl_map = th.generate_topology_map(cluster, is_node_awareness=False) tpl_map = th.generate_topology_map(cluster, is_node_awareness=False)
topology_data = "\n".join( topology_data = "\n".join(
[k + " " + v for k, v in tpl_map.items()]) + "\n" [k + " " + v for k, v in tpl_map.items()]) + "\n"

View File

@ -16,6 +16,7 @@
from oslo.config import cfg from oslo.config import cfg
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.plugins import provisioning as p from sahara.plugins import provisioning as p
from sahara.utils import types from sahara.utils import types
@ -157,7 +158,8 @@ def get_config_value(pctx, service, name, cluster=None):
return c.default_value return c.default_value
raise ex.NotFoundException( raise ex.NotFoundException(
name, "Unable to get parameter '%s' from service %s" % (name, service)) name, _("Unable to get parameter '%(name)s' from service %(service)s")
% {"name": name, "service": service})
def is_swift_enabled(pctx, cluster): def is_swift_enabled(pctx, cluster):

View File

@ -14,6 +14,8 @@
# limitations under the License. # limitations under the License.
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper
@ -50,7 +52,7 @@ def _start_processes(instance, processes):
'sudo su - -c "yarn-daemon.sh start %s" hadoop' % process) 'sudo su - -c "yarn-daemon.sh start %s" hadoop' % process)
else: else:
raise ex.HadoopProvisionError( raise ex.HadoopProvisionError(
"Process %s is not supported" % process) _("Process %s is not supported") % process)
def start_hadoop_process(instance, process): def start_hadoop_process(instance, process):
@ -142,12 +144,12 @@ def await_datanodes(cluster):
if datanodes_count < 1: if datanodes_count < 1:
return return
LOG.info("Waiting %s datanodes to start up" % datanodes_count) LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count)
with vu.get_namenode(cluster).remote() as r: with vu.get_namenode(cluster).remote() as r:
while True: while True:
if _check_datanodes_count(r, datanodes_count): if _check_datanodes_count(r, datanodes_count):
LOG.info( LOG.info(
'Datanodes on cluster %s has been started' % _LI('Datanodes on cluster %s has been started'),
cluster.name) cluster.name)
return return
@ -155,8 +157,8 @@ def await_datanodes(cluster):
if not g.check_cluster_exists(cluster): if not g.check_cluster_exists(cluster):
LOG.info( LOG.info(
'Stop waiting datanodes on cluster %s since it has ' _LI('Stop waiting datanodes on cluster %s since it has '
'been deleted' % cluster.name) 'been deleted'), cluster.name)
return return

View File

@ -16,6 +16,7 @@
from oslo.utils import timeutils from oslo.utils import timeutils
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u from sahara.plugins.general import utils as u
from sahara.plugins.vanilla.hadoop2 import config from sahara.plugins.vanilla.hadoop2 import config
@ -122,8 +123,9 @@ def _check_decommission(cluster, instances, check_func, timeout):
context.sleep(5) context.sleep(5)
else: else:
ex.DecommissionError( ex.DecommissionError(
"Cannot finish decommission of cluster %s in %d seconds" % _("Cannot finish decommission of cluster %(cluster)s in "
(cluster, timeout)) "%(seconds)d seconds") %
{"cluster": cluster, "seconds": timeout})
def _check_nodemanagers_decommission(cluster, instances): def _check_nodemanagers_decommission(cluster, instances):

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u from sahara.plugins.general import utils as u
from sahara.plugins.vanilla.hadoop2 import config_helper as cu from sahara.plugins.vanilla.hadoop2 import config_helper as cu
@ -67,8 +68,9 @@ def validate_cluster_creating(pctx, cluster):
rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
if dn_count < rep_factor: if dn_count < rep_factor:
raise ex.InvalidComponentCountException( raise ex.InvalidComponentCountException(
'datanode', rep_factor, dn_count, 'Number of datanodes must be not' 'datanode', rep_factor, dn_count, _('Number of datanodes must be '
' less than dfs.replication.') 'not less than '
'dfs.replication.'))
def validate_additional_ng_scaling(cluster, additional): def validate_additional_ng_scaling(cluster, additional):
@ -78,12 +80,12 @@ def validate_additional_ng_scaling(cluster, additional):
for ng_id in additional: for ng_id in additional:
ng = gu.get_by_id(cluster.node_groups, ng_id) ng = gu.get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes): if not set(ng.node_processes).issubset(scalable_processes):
msg = "Vanilla plugin cannot scale nodegroup with processes: %s" msg = _("Vanilla plugin cannot scale nodegroup with processes: %s")
raise ex.NodeGroupCannotBeScaled(ng.name, raise ex.NodeGroupCannotBeScaled(ng.name,
msg % ' '.join(ng.node_processes)) msg % ' '.join(ng.node_processes))
if not rm and 'nodemanager' in ng.node_processes: if not rm and 'nodemanager' in ng.node_processes:
msg = ("Vanilla plugin cannot scale node group with processes " msg = _("Vanilla plugin cannot scale node group with processes "
"which have no master-processes run in cluster") "which have no master-processes run in cluster")
raise ex.NodeGroupCannotBeScaled(ng.name, msg) raise ex.NodeGroupCannotBeScaled(ng.name, msg)
@ -97,7 +99,7 @@ def validate_existing_ng_scaling(pctx, cluster, existing):
dn_to_delete += ng.count - existing[ng.id] dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes): if not set(ng.node_processes).issubset(scalable_processes):
msg = ("Vanilla plugin cannot scale nodegroup " msg = _("Vanilla plugin cannot scale nodegroup "
"with processes: %s") "with processes: %s")
raise ex.NodeGroupCannotBeScaled( raise ex.NodeGroupCannotBeScaled(
ng.name, msg % ' '.join(ng.node_processes)) ng.name, msg % ' '.join(ng.node_processes))
@ -106,8 +108,8 @@ def validate_existing_ng_scaling(pctx, cluster, existing):
rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
msg = ("Vanilla plugin cannot shrink cluster because it would be not " msg = _("Vanilla plugin cannot shrink cluster because it would be "
"enough nodes for replicas (replication factor is %s)") "not enough nodes for replicas (replication factor is %s)")
raise ex.ClusterCannotBeScaled( raise ex.ClusterCannotBeScaled(
cluster.name, msg % rep_factor) cluster.name, msg % rep_factor)

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils as u from sahara.plugins.general import utils as u
from sahara.plugins import provisioning as p from sahara.plugins import provisioning as p
@ -25,9 +26,9 @@ class VanillaProvider(p.ProvisioningPluginBase):
def get_description(self): def get_description(self):
return ( return (
"This plugin provides an ability to launch vanilla Apache Hadoop " _("This plugin provides an ability to launch vanilla Apache Hadoop"
"cluster without any management consoles. Also it can " " cluster without any management consoles. Also it can "
"deploy Oozie and Hive") "deploy Oozie and Hive"))
def _get_version_handler(self, hadoop_version): def _get_version_handler(self, hadoop_version):
return self.version_factory.get_version_handler(hadoop_version) return self.version_factory.get_version_handler(hadoop_version)

View File

@ -18,6 +18,9 @@ from oslo.config import cfg
from sahara import conductor as c from sahara import conductor as c
from sahara import context from sahara import context
from sahara import exceptions as ex from sahara import exceptions as ex
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import utils from sahara.plugins.general import utils
from sahara.plugins import provisioning as p from sahara.plugins import provisioning as p
@ -207,8 +210,9 @@ def get_config_value(service, name, cluster=None):
if configs.applicable_target == service and configs.name == name: if configs.applicable_target == service and configs.name == name:
return configs.default_value return configs.default_value
raise ex.ConfigurationError("Unable get parameter '%s' from service %s" % raise ex.ConfigurationError(_("Unable get parameter '%(parameter)s' from "
(name, service)) "service %(service)s")
% {"parameter": name, "service": service})
def generate_cfg_from_general(cfg, configs, general_config, def generate_cfg_from_general(cfg, configs, general_config,
@ -220,7 +224,7 @@ def generate_cfg_from_general(cfg, configs, general_config,
for name, value in configs['general'].items(): for name, value in configs['general'].items():
if value: if value:
cfg = _set_config(cfg, general_config, name) cfg = _set_config(cfg, general_config, name)
LOG.info("Applying config: %s" % name) LOG.info(_LI("Applying config: %s"), name)
else: else:
cfg = _set_config(cfg, general_config) cfg = _set_config(cfg, general_config)
return cfg return cfg
@ -356,8 +360,8 @@ def extract_environment_confs(configs):
if param_name == cfg_name and param_value is not None: if param_name == cfg_name and param_value is not None:
lst.append(cfg_format_str % param_value) lst.append(cfg_format_str % param_value)
else: else:
LOG.warn("Plugin received wrong applicable target '%s' in " LOG.warn(_LW("Plugin received wrong applicable target '%s' in "
"environmental configs" % service) "environmental configs"), service)
return lst return lst
@ -377,8 +381,8 @@ def extract_xml_confs(configs):
if param_name in names and param_value is not None: if param_name in names and param_value is not None:
lst.append((param_name, param_value)) lst.append((param_name, param_value))
else: else:
LOG.warn("Plugin received wrong applicable target '%s' for " LOG.warn(_LW("Plugin received wrong applicable target '%s' for "
"xml configs" % service) "xml configs"), service)
return lst return lst

View File

@ -19,6 +19,7 @@ from oslo.utils import timeutils
import six import six
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils from sahara.plugins.general import utils
from sahara.plugins.vanilla.v1_2_1 import config_helper from sahara.plugins.vanilla.v1_2_1 import config_helper
@ -75,8 +76,9 @@ def decommission_dn(nn, inst_to_be_deleted, survived_inst):
if not all_found: if not all_found:
ex.DecommissionError( ex.DecommissionError(
"Cannot finish decommission of cluster %s in %d seconds" % _("Cannot finish decommission of cluster %(cluster)s in "
(nn.node_group.cluster, timeout)) "%(seconds)d seconds") %
{"cluster": nn.node_group.cluster, "seconds": timeout})
def parse_dfs_report(cmd_output): def parse_dfs_report(cmd_output):

View File

@ -20,6 +20,8 @@ import six
from sahara import conductor from sahara import conductor
from sahara import context from sahara import context
from sahara.i18n import _
from sahara.i18n import _LI
from sahara.openstack.common import log as logging from sahara.openstack.common import log as logging
from sahara.plugins.general import exceptions as ex from sahara.plugins.general import exceptions as ex
from sahara.plugins.general import utils from sahara.plugins.general import utils
@ -68,14 +70,14 @@ class VersionHandler(avm.AbstractVersionHandler):
in utils.get_node_groups(cluster, "jobtracker")]) in utils.get_node_groups(cluster, "jobtracker")])
if jt_count not in [0, 1]: if jt_count not in [0, 1]:
raise ex.InvalidComponentCountException("jobtracker", '0 or 1', raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'),
jt_count) jt_count)
oozie_count = sum([ng.count for ng oozie_count = sum([ng.count for ng
in utils.get_node_groups(cluster, "oozie")]) in utils.get_node_groups(cluster, "oozie")])
if oozie_count not in [0, 1]: if oozie_count not in [0, 1]:
raise ex.InvalidComponentCountException("oozie", '0 or 1', raise ex.InvalidComponentCountException("oozie", _('0 or 1'),
oozie_count) oozie_count)
hive_count = sum([ng.count for ng hive_count = sum([ng.count for ng
@ -97,7 +99,7 @@ class VersionHandler(avm.AbstractVersionHandler):
"jobtracker", required_by="hive") "jobtracker", required_by="hive")
if hive_count not in [0, 1]: if hive_count not in [0, 1]:
raise ex.InvalidComponentCountException("hive", '0 or 1', raise ex.InvalidComponentCountException("hive", _('0 or 1'),
hive_count) hive_count)
def configure_cluster(self, cluster): def configure_cluster(self, cluster):
@ -122,7 +124,7 @@ class VersionHandler(avm.AbstractVersionHandler):
self._await_datanodes(cluster) self._await_datanodes(cluster)
LOG.info("Hadoop services in cluster %s have been started" % LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name) cluster.name)
oozie = vu.get_oozie(cluster) oozie = vu.get_oozie(cluster)
@ -133,7 +135,7 @@ class VersionHandler(avm.AbstractVersionHandler):
run.oozie_create_db(r) run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname()) run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r) run.start_oozie(r)
LOG.info("Oozie service at '%s' has been started", LOG.info(_LI("Oozie service at '%s' has been started"),
nn_instance.hostname()) nn_instance.hostname())
hive_server = vu.get_hiveserver(cluster) hive_server = vu.get_hiveserver(cluster)
@ -148,10 +150,11 @@ class VersionHandler(avm.AbstractVersionHandler):
run.mysql_start(r, hive_server) run.mysql_start(r, hive_server)
run.hive_create_db(r) run.hive_create_db(r)
run.hive_metastore_start(r) run.hive_metastore_start(r)
LOG.info("Hive Metastore server at %s has been started", LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
hive_server.hostname()) hive_server.hostname())
LOG.info('Cluster %s has been started successfully' % cluster.name) LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
self._set_cluster_info(cluster) self._set_cluster_info(cluster)
def _await_datanodes(self, cluster): def _await_datanodes(self, cluster):
@ -159,12 +162,12 @@ class VersionHandler(avm.AbstractVersionHandler):
if datanodes_count < 1: if datanodes_count < 1:
return return
LOG.info("Waiting %s datanodes to start up" % datanodes_count) LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count)
with remote.get_remote(vu.get_namenode(cluster)) as r: with remote.get_remote(vu.get_namenode(cluster)) as r:
while True: while True:
if run.check_datanodes_count(r, datanodes_count): if run.check_datanodes_count(r, datanodes_count):
LOG.info( LOG.info(
'Datanodes on cluster %s has been started' % _LI('Datanodes on cluster %s has been started'),
cluster.name) cluster.name)
return return
@ -172,8 +175,8 @@ class VersionHandler(avm.AbstractVersionHandler):
if not g.check_cluster_exists(cluster): if not g.check_cluster_exists(cluster):
LOG.info( LOG.info(
'Stop waiting datanodes on cluster %s since it has ' _LI('Stop waiting datanodes on cluster %s since it has'
'been deleted' % cluster.name) ' been deleted'), cluster.name)
return return
def _extract_configs_to_extra(self, cluster): def _extract_configs_to_extra(self, cluster):
@ -451,14 +454,14 @@ class VersionHandler(avm.AbstractVersionHandler):
ng = self._get_by_id(cluster.node_groups, ng_id) ng = self._get_by_id(cluster.node_groups, ng_id)
if not set(ng.node_processes).issubset(scalable_processes): if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled( raise ex.NodeGroupCannotBeScaled(
ng.name, "Vanilla plugin cannot scale nodegroup" ng.name, _("Vanilla plugin cannot scale nodegroup"
" with processes: " + " with processes: %s") %
' '.join(ng.node_processes)) ' '.join(ng.node_processes))
if not jt and 'tasktracker' in ng.node_processes: if not jt and 'tasktracker' in ng.node_processes:
raise ex.NodeGroupCannotBeScaled( raise ex.NodeGroupCannotBeScaled(
ng.name, "Vanilla plugin cannot scale node group with " ng.name, _("Vanilla plugin cannot scale node group with "
"processes which have no master-processes run " "processes which have no master-processes run "
"in cluster") "in cluster"))
def _validate_existing_ng_scaling(self, cluster, existing): def _validate_existing_ng_scaling(self, cluster, existing):
scalable_processes = self._get_scalable_processes() scalable_processes = self._get_scalable_processes()
@ -470,8 +473,8 @@ class VersionHandler(avm.AbstractVersionHandler):
dn_to_delete += ng.count - existing[ng.id] dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes): if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled( raise ex.NodeGroupCannotBeScaled(
ng.name, "Vanilla plugin cannot scale nodegroup" ng.name, _("Vanilla plugin cannot scale nodegroup"
" with processes: " + " with processes: %s") %
' '.join(ng.node_processes)) ' '.join(ng.node_processes))
dn_amount = len(vu.get_datanodes(cluster)) dn_amount = len(vu.get_datanodes(cluster))
@ -480,6 +483,6 @@ class VersionHandler(avm.AbstractVersionHandler):
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled( raise ex.ClusterCannotBeScaled(
cluster.name, "Vanilla plugin cannot shrink cluster because " cluster.name, _("Vanilla plugin cannot shrink cluster because "
"it would be not enough nodes for replicas " "it would be not enough nodes for replicas "
"(replication factor is %s)" % rep_factor) "(replication factor is %s)") % rep_factor)

View File

@ -61,5 +61,5 @@ builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools
[hacking] [hacking]
import_exceptions = sahara.i18n._, sahara.i18n._LI, sahara.i18n._LW, sahara.i18n._LE import_exceptions = sahara.i18n._, sahara.i18n._LI, sahara.i18n._LW, sahara.i18n._LE, sahara.i18n._LC
local-check-factory = sahara.utils.hacking.checks.factory local-check-factory = sahara.utils.hacking.checks.factory