diff --git a/sahara/plugins/base.py b/sahara/plugins/base.py index 3cc60fb129..c1ee03597f 100644 --- a/sahara/plugins/base.py +++ b/sahara/plugins/base.py @@ -20,6 +20,8 @@ import six from stevedore import enabled from sahara import exceptions as ex +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.utils import resources @@ -98,17 +100,18 @@ class PluginManager(object): for ext in extension_manager.extensions: if ext.name in self.plugins: raise ex.ConfigurationError( - "Plugin with name '%s' already exists." % ext.name) + _("Plugin with name '%s' already exists.") % ext.name) ext.obj.name = ext.name self.plugins[ext.name] = ext.obj - LOG.info("Plugin '%s' loaded (%s)" - % (ext.name, ext.entry_point_target)) + LOG.info(_LI("Plugin '%(plugin_name)s' loaded %(entry_point)s"), + {'plugin_name': ext.name, + 'entry_point': ext.entry_point_target}) if len(self.plugins) < len(config_plugins): loaded_plugins = set(six.iterkeys(self.plugins)) requested_plugins = set(config_plugins) raise ex.ConfigurationError( - "Plugins couldn't be loaded: %s" % + _("Plugins couldn't be loaded: %s") % ", ".join(requested_plugins - loaded_plugins)) def get_plugins(self, base): diff --git a/sahara/plugins/fake/plugin.py b/sahara/plugins/fake/plugin.py index 65f9519d78..971b9e61a7 100644 --- a/sahara/plugins/fake/plugin.py +++ b/sahara/plugins/fake/plugin.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sahara.i18n import _ from sahara.plugins import provisioning as p @@ -22,9 +23,9 @@ class FakePluginProvider(p.ProvisioningPluginBase): return "Fake Plugin" def get_description(self): - return ("It's a fake plugin that aimed to work on the CirrOS images. " - "It doesn't install Hadoop. It's needed to be able to test " - "provisioning part of Sahara codebase itself.") + return _("It's a fake plugin that aimed to work on the CirrOS images. " + "It doesn't install Hadoop. It's needed to be able to test " + "provisioning part of Sahara codebase itself.") def get_versions(self): return ["0.1"] diff --git a/sahara/plugins/general/exceptions.py b/sahara/plugins/general/exceptions.py index 00485dfe6e..87a941d993 100644 --- a/sahara/plugins/general/exceptions.py +++ b/sahara/plugins/general/exceptions.py @@ -14,18 +14,19 @@ # limitations under the License. import sahara.exceptions as e +from sahara.i18n import _ class NodeGroupCannotBeScaled(e.SaharaException): def __init__(self, ng_name, reason): - self.message = ("Chosen node group %s cannot be scaled : " - "%s" % (ng_name, reason)) + self.message = _("Chosen node group %(ng_name)s cannot be scaled : " + "%(reason)s") % {"ng_name": ng_name, "reason": reason} self.code = "NODE_GROUP_CANNOT_BE_SCALED" class DecommissionError(e.SaharaException): code = "DECOMMISSION_ERROR" - message = "Failed to decommission cluster" + message = _("Failed to decommission cluster") def __init__(self, message=None): if message: @@ -34,8 +35,9 @@ class DecommissionError(e.SaharaException): class ClusterCannotBeScaled(e.SaharaException): def __init__(self, cluster_name, reason): - self.message = ("Cluster %s cannot be scaled : " - "%s" % (cluster_name, reason)) + self.message = _("Cluster %(cluster_name)s cannot be scaled : " + "%(reason)s") % {"cluster_name": cluster_name, + "reason": reason} self.code = "CLUSTER_CANNOT_BE_SCALED" @@ -43,11 +45,13 @@ class RequiredServiceMissingException(e.SaharaException): """Exception indicating that a required service has not been deployed.""" def __init__(self, service_name, required_by=None): - self.message = ('Cluster is missing a service: %s' + self.message = (_('Cluster is missing a service: %s') % service_name) if required_by: - self.message = ('%s, required by service: %s' - % (self.message, required_by)) + self.message = (_('%(message)s, required by service: ' + '%(required_by)s') + % {'message': self.message, + 'required_by': required_by}) self.code = 'MISSING_SERVICE' @@ -62,11 +66,17 @@ class InvalidComponentCountException(e.SaharaException): """ def __init__(self, component, expected_count, count, description=None): - self.message = ("Hadoop cluster should contain {0} {1} component(s)." - " Actual {1} count is {2}".format( - expected_count, component, count)) + message = _("Hadoop cluster should contain %(expected_count)s " + "%(component)s component(s)." + " Actual %(component)s count is %(count)s") + if description: - self.message += '. ' + description + message = ("%(message)s. %(description)s" + % {'message': message, 'description': description}) + + self.message = message % {"expected_count": expected_count, + "component": component, "count": count} + self.code = "INVALID_COMPONENT_COUNT" super(InvalidComponentCountException, self).__init__() @@ -78,7 +88,7 @@ class HadoopProvisionError(e.SaharaException): A message indicating the reason for failure must be provided. """ - base_message = "Failed to Provision Hadoop Cluster: %s" + base_message = _("Failed to Provision Hadoop Cluster: %s") def __init__(self, message): self.code = "HADOOP_PROVISION_FAILED" diff --git a/sahara/plugins/general/utils.py b/sahara/plugins/general/utils.py index f096458c1d..fe7a3e83f8 100644 --- a/sahara/plugins/general/utils.py +++ b/sahara/plugins/general/utils.py @@ -16,6 +16,7 @@ from oslo.utils import netutils from six.moves.urllib import parse as urlparse +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex @@ -38,7 +39,7 @@ def get_instance(cluster, node_process): instances = get_instances(cluster, node_process) if len(instances) > 1: raise ex.InvalidComponentCountException( - node_process, '0 or 1', len(instances)) + node_process, _('0 or 1'), len(instances)) return instances[0] if instances else None diff --git a/sahara/plugins/hdp/ambariplugin.py b/sahara/plugins/hdp/ambariplugin.py index b24b82eb28..22488a4e72 100644 --- a/sahara/plugins/hdp/ambariplugin.py +++ b/sahara/plugins/hdp/ambariplugin.py @@ -17,6 +17,8 @@ from oslo.config import cfg from sahara import conductor from sahara import context +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils as u @@ -64,7 +66,7 @@ class AmbariPlugin(p.ProvisioningPluginBase): # enabled self._configure_topology_for_cluster(cluster, servers) - LOG.info("Install of Hadoop stack successful.") + LOG.info(_LI("Install of Hadoop stack successful.")) # add service urls self._set_cluster_info(cluster, cluster_spec) @@ -171,8 +173,8 @@ class AmbariPlugin(p.ProvisioningPluginBase): servers, version): # TODO(jspeidel): encapsulate in another class - LOG.info('Provisioning Cluster via Ambari Server: {0} ...'.format( - ambari_info.get_address())) + LOG.info(_LI('Provisioning Cluster via Ambari Server: {0} ...') + .format(ambari_info.get_address())) for server in servers: self._spawn( @@ -221,9 +223,9 @@ class AmbariPlugin(p.ProvisioningPluginBase): if not is_admin_provided: if admin_user is None: - raise ex.HadoopProvisionError("An Ambari user in the " - "admin group must be " - "configured.") + raise ex.HadoopProvisionError(_("An Ambari user in the" + " admin group must be " + "configured.")) ambari_info.user = admin_user ambari_info.password = admin_password ambari_client.delete_ambari_user('admin', ambari_info) @@ -240,7 +242,7 @@ class AmbariPlugin(p.ProvisioningPluginBase): ambari_info.user = admin_user.name ambari_info.password = admin_user.password - LOG.info('Using "{0}" as admin user for scaling of cluster' + LOG.info(_LI('Using "{0}" as admin user for scaling of cluster') .format(ambari_info.user)) # PLUGIN SPI METHODS: @@ -276,9 +278,9 @@ class AmbariPlugin(p.ProvisioningPluginBase): return 'Hortonworks Data Platform' def get_description(self): - return ('The Hortonworks OpenStack plugin works with project ' - 'Sahara to automate the deployment of the Hortonworks data' - ' platform on OpenStack based public & private clouds') + return _('The Hortonworks OpenStack plugin works with project ' + 'Sahara to automate the deployment of the Hortonworks data' + ' platform on OpenStack based public & private clouds') def validate(self, cluster): # creating operational config results in validation @@ -323,8 +325,8 @@ class AmbariPlugin(p.ProvisioningPluginBase): ambari_client.cleanup(ambari_info) def decommission_nodes(self, cluster, instances): - LOG.info('AmbariPlugin: decommission_nodes called for ' - 'HDP version = ' + cluster.hadoop_version) + LOG.info(_LI('AmbariPlugin: decommission_nodes called for ' + 'HDP version = %s'), cluster.hadoop_version) handler = self.version_factory.get_version_handler( cluster.hadoop_version) diff --git a/sahara/plugins/hdp/clusterspec.py b/sahara/plugins/hdp/clusterspec.py index 30182574cc..172947acbf 100644 --- a/sahara/plugins/hdp/clusterspec.py +++ b/sahara/plugins/hdp/clusterspec.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sahara.i18n import _ from sahara.openstack.common import jsonutils as json from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex @@ -312,9 +313,9 @@ class NormalizedClusterConfig(): return 'boolean' else: raise ValueError( - "Could not determine property type for property '{0}' with " - "value: {1}". - format(prop, value)) + _("Could not determine property type for property " + "'%(property)s' with value: %(value)s") % + {"property": prop, "value": value}) class NormalizedConfig(): diff --git a/sahara/plugins/hdp/configprovider.py b/sahara/plugins/hdp/configprovider.py index e2abf89e84..3d40b9f532 100644 --- a/sahara/plugins/hdp/configprovider.py +++ b/sahara/plugins/hdp/configprovider.py @@ -14,6 +14,7 @@ # limitations under the License. from sahara import exceptions +from sahara.i18n import _ from sahara.plugins import provisioning as p @@ -61,8 +62,8 @@ class ConfigurationProvider: # internal error # ambari-config-resource contains duplicates raise exceptions.InvalidDataException( - 'Internal Error. Duplicate property ' - 'name detected: %s' % property_name) + _('Internal Error. Duplicate property ' + 'name detected: %s') % property_name) self.config_mapper[service_property['name']] = ( self._get_target( service_property['applicable_target'])) diff --git a/sahara/plugins/hdp/hadoopserver.py b/sahara/plugins/hdp/hadoopserver.py index 0bf22a31a3..51de53da2f 100644 --- a/sahara/plugins/hdp/hadoopserver.py +++ b/sahara/plugins/hdp/hadoopserver.py @@ -15,6 +15,8 @@ import re +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex from sahara.plugins.hdp import saharautils @@ -64,7 +66,7 @@ class HadoopServer: @saharautils.inject_remote('r') def install_rpms(self, r): LOG.info( - "{0}: Installing rpm's ...".format(self.instance.hostname())) + _LI("{0}: Installing rpm's ...").format(self.instance.hostname())) # TODO(jspeidel): based on image type, use correct command curl_cmd = ('curl -f -s -o /etc/yum.repos.d/ambari.repo %s' % @@ -76,17 +78,17 @@ class HadoopServer: yum_cmd = 'yum -y install %s' % EPEL_RELEASE_PACKAGE_NAME r.execute_command(yum_cmd, run_as_root=True) else: - LOG.info("{0}: Unable to install rpm's from repo, " - "checking for local install." + LOG.info(_LI("{0}: Unable to install rpm's from repo, " + "checking for local install.") .format(self.instance.hostname())) if not self.rpms_installed(): raise ex.HadoopProvisionError( - 'Failed to install Hortonworks Ambari') + _('Failed to install Hortonworks Ambari')) @saharautils.inject_remote('r') def install_swift_integration(self, r): LOG.info( - "{0}: Installing swift integration ..." + _LI("{0}: Installing swift integration ...") .format(self.instance.hostname())) base_rpm_cmd = 'rpm -U --quiet ' rpm_cmd = base_rpm_cmd + HADOOP_SWIFT_RPM @@ -94,8 +96,8 @@ class HadoopServer: run_as_root=True, raise_when_error=False) if ret_code != 0: - LOG.info("{0}: Unable to install swift integration from source, " - "checking for local rpm." + LOG.info(_LI("{0}: Unable to install swift integration from " + "source, checking for local rpm.") .format(self.instance.hostname())) ret_code, stdout = r.execute_command( 'ls ' + HADOOP_SWIFT_LOCAL_RPM, @@ -106,7 +108,7 @@ class HadoopServer: r.execute_command(rpm_cmd, run_as_root=True) else: raise ex.HadoopProvisionError( - 'Failed to install Hadoop Swift integration') + _('Failed to install Hadoop Swift integration')) @saharautils.inject_remote('r') def configure_topology(self, topology_str, r): @@ -121,11 +123,11 @@ class HadoopServer: @saharautils.inject_remote('r') def _setup_and_start_ambari_server(self, port, jdk_path, r): - LOG.info('{0}: Installing ambari-server ...'.format( + LOG.info(_LI('{0}: Installing ambari-server ...').format( self.instance.hostname())) r.execute_command('yum -y install ambari-server', run_as_root=True) - LOG.info('Running Ambari Server setup ...') + LOG.info(_LI('Running Ambari Server setup ...')) # remove postgres data directory as a precaution since its existence # has prevented successful postgres installation r.execute_command('rm -rf /var/lib/pgsql/data', run_as_root=True) @@ -151,7 +153,7 @@ class HadoopServer: self._configure_ambari_server_api_port(port) - LOG.info('Starting Ambari ...') + LOG.info(_LI('Starting Ambari ...')) # NOTE(dmitryme): Reading stdout from 'ambari-server start' # hangs ssh. Redirecting output to /dev/null fixes that r.execute_command( @@ -175,7 +177,7 @@ class HadoopServer: @saharautils.inject_remote('r') def _setup_and_start_ambari_agent(self, ambari_server_ip, r): - LOG.info('{0}: Installing Ambari Agent ...'.format( + LOG.info(_LI('{0}: Installing Ambari Agent ...').format( self.instance.hostname())) r.execute_command('yum -y install ambari-agent', run_as_root=True) @@ -187,7 +189,8 @@ class HadoopServer: ambari_server_ip) LOG.info( - '{0}: Starting Ambari Agent ...'.format(self.instance.hostname())) + _LI('{0}: Starting Ambari Agent ...').format( + self.instance.hostname())) # If the HDP 2 ambari agent is pre-installed on an image, the agent # will start up during instance launch and therefore the agent # registration will fail. It is therefore more appropriate to call diff --git a/sahara/plugins/hdp/versions/version_1_3_2/services.py b/sahara/plugins/hdp/versions/version_1_3_2/services.py index d4ef441a73..616c7e6afe 100644 --- a/sahara/plugins/hdp/versions/version_1_3_2/services.py +++ b/sahara/plugins/hdp/versions/version_1_3_2/services.py @@ -20,6 +20,7 @@ import six from sahara import exceptions as e +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils from sahara.swift import swift_helper as h @@ -518,8 +519,8 @@ class HBaseService(Service): configurations['global']['hbase_hdfs_root_dir'] = match.group(3) else: raise e.InvalidDataException( - "Invalid value for property 'hbase-site/hbase.rootdir' : %s" % - user_input.value) + _("Invalid value for property 'hbase-site/hbase.rootdir' : %s") + % user_input.value) def finalize_configuration(self, cluster_spec): nn_servers = cluster_spec.determine_component_hosts('NAMENODE') diff --git a/sahara/plugins/hdp/versions/version_1_3_2/versionhandler.py b/sahara/plugins/hdp/versions/version_1_3_2/versionhandler.py index cbfb333e51..fa556601b3 100644 --- a/sahara/plugins/hdp/versions/version_1_3_2/versionhandler.py +++ b/sahara/plugins/hdp/versions/version_1_3_2/versionhandler.py @@ -22,7 +22,11 @@ import requests from sahara import context from sahara import exceptions as exc +from sahara.i18n import _ +from sahara.i18n import _LC +from sahara.i18n import _LE from sahara.i18n import _LI +from sahara.i18n import _LW from sahara.plugins.general import exceptions as ex from sahara.plugins.hdp import clusterspec as cs from sahara.plugins.hdp import configprovider as cfgprov @@ -166,9 +170,9 @@ class AmbariClient(): self.handler.get_version() + '"}}') if result.status_code != 201: - LOG.error('Create cluster command failed. %s' % result.text) + LOG.error(_LE('Create cluster command failed. %s') % result.text) raise ex.HadoopProvisionError( - 'Failed to add cluster: %s' % result.text) + _('Failed to add cluster: %s') % result.text) def _add_configurations_to_cluster( self, cluster_spec, ambari_info, name): @@ -214,10 +218,10 @@ class AmbariClient(): result = self._put(config_url, ambari_info, data=json.dumps(body)) if result.status_code != 200: LOG.error( - 'Set configuration command failed. {0}'.format( + _LE('Set configuration command failed. {0}').format( result.text)) raise ex.HadoopProvisionError( - 'Failed to set configurations on cluster: %s' + _('Failed to set configurations on cluster: %s') % result.text) def _add_services_to_cluster(self, cluster_spec, ambari_info, name): @@ -230,10 +234,11 @@ class AmbariClient(): ambari_info) if result.status_code not in [201, 409]: LOG.error( - 'Create service command failed. {0}'.format( + _LE('Create service command failed. {0}').format( result.text)) raise ex.HadoopProvisionError( - 'Failed to add services to cluster: %s' % result.text) + _('Failed to add services to cluster: %s') + % result.text) def _add_components_to_services(self, cluster_spec, ambari_info, name): add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{' @@ -247,10 +252,10 @@ class AmbariClient(): ambari_info) if result.status_code not in [201, 409]: LOG.error( - 'Create component command failed. {0}'.format( + _LE('Create component command failed. {0}').format( result.text)) raise ex.HadoopProvisionError( - 'Failed to add components to services: %s' + _('Failed to add components to services: %s') % result.text) def _add_hosts_and_components( @@ -266,9 +271,9 @@ class AmbariClient(): ambari_info) if result.status_code != 201: LOG.error( - 'Create host command failed. {0}'.format(result.text)) + _LE('Create host command failed. {0}').format(result.text)) raise ex.HadoopProvisionError( - 'Failed to add host: %s' % result.text) + _('Failed to add host: %s') % result.text) node_group_name = host.node_group.name # TODO(jspeidel): ensure that node group exists @@ -281,13 +286,14 @@ class AmbariClient(): ambari_info) if result.status_code != 201: LOG.error( - 'Create host_component command failed. %s' % + _LE('Create host_component command failed. %s'), result.text) raise ex.HadoopProvisionError( - 'Failed to add host component: %s' % result.text) + _('Failed to add host component: %s') + % result.text) def _install_services(self, cluster_name, ambari_info): - LOG.info('Installing required Hadoop services ...') + LOG.info(_LI('Installing required Hadoop services ...')) ambari_address = ambari_info.get_address() install_url = ('http://{0}/api/v1/clusters/{' @@ -305,17 +311,17 @@ class AmbariClient(): ambari_info, cluster_name, request_id), ambari_info) if success: - LOG.info("Install of Hadoop stack successful.") + LOG.info(_LI("Install of Hadoop stack successful.")) self._finalize_ambari_state(ambari_info) else: - LOG.critical('Install command failed.') + LOG.critical(_LC('Install command failed.')) raise ex.HadoopProvisionError( - 'Installation of Hadoop stack failed.') + _('Installation of Hadoop stack failed.')) elif result.status_code != 200: LOG.error( - 'Install command failed. {0}'.format(result.text)) + _LE('Install command failed. {0}').format(result.text)) raise ex.HadoopProvisionError( - 'Installation of Hadoop stack failed.') + _('Installation of Hadoop stack failed.')) def _get_async_request_uri(self, ambari_info, cluster_name, request_id): return ('http://{0}/api/v1/clusters/{1}/requests/{' @@ -343,7 +349,7 @@ class AmbariClient(): return started def _finalize_ambari_state(self, ambari_info): - LOG.info('Finalizing Ambari cluster state.') + LOG.info(_LI('Finalizing Ambari cluster state.')) persist_state_uri = 'http://{0}/api/v1/persist'.format( ambari_info.get_address()) @@ -354,14 +360,17 @@ class AmbariClient(): result = self._post(persist_state_uri, ambari_info, data=persist_data) if result.status_code != 201 and result.status_code != 202: - LOG.warning('Finalizing of Ambari cluster state failed. {0}'. + LOG.warning(_LW('Finalizing of Ambari cluster state failed. {0}'). format(result.text)) - raise ex.HadoopProvisionError('Unable to finalize Ambari state.') + raise ex.HadoopProvisionError(_('Unable to finalize Ambari ' + 'state.')) def start_services(self, cluster_name, cluster_spec, ambari_info): - LOG.info('Starting Hadoop services ...') - LOG.info('Cluster name: {0}, Ambari server address: {1}' - .format(cluster_name, ambari_info.get_address())) + LOG.info(_LI('Starting Hadoop services ...')) + LOG.info(_LI('Cluster name: %(cluster_name)s, Ambari server address: ' + '%(server_address)s'), + {'cluster_name': cluster_name, + 'server_address': ambari_info.get_address()}) start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/' 'state=INSTALLED'.format( ambari_info.get_address(), cluster_name)) @@ -379,19 +388,20 @@ class AmbariClient(): request_id), ambari_info) if success: LOG.info( - "Successfully started Hadoop cluster '{0}'.".format( + _LI("Successfully started Hadoop cluster '{0}'.").format( cluster_name)) else: - LOG.critical('Failed to start Hadoop cluster.') + LOG.critical(_LC('Failed to start Hadoop cluster.')) raise ex.HadoopProvisionError( - 'Start of Hadoop services failed.') + _('Start of Hadoop services failed.')) elif result.status_code != 200: LOG.error( - 'Start command failed. Status: {0}, response: {1}'. - format(result.status_code, result.text)) + _LE('Start command failed. Status: %(status)s, ' + 'response: %(response)s'), + {'status': result.status_code, 'response': result.text}) raise ex.HadoopProvisionError( - 'Start of Hadoop services failed.') + _('Start of Hadoop services failed.')) def _exec_ambari_command(self, ambari_info, body, cmd_uri): @@ -405,18 +415,19 @@ class AmbariClient(): success = self._wait_for_async_request(href, ambari_info) if success: LOG.info( - "Successfully changed state of Hadoop components ") + _LI("Successfully changed state of Hadoop components ")) else: - LOG.critical('Failed to change state of Hadoop ' - 'components') + LOG.critical(_LC('Failed to change state of Hadoop ' + 'components')) raise ex.HadoopProvisionError( - 'Failed to change state of Hadoop components') + _('Failed to change state of Hadoop components')) else: LOG.error( - 'Command failed. Status: {0}, response: {1}'. - format(result.status_code, result.text)) - raise ex.HadoopProvisionError('Hadoop/Ambari command failed.') + _LE('Command failed. Status: %(status)s, response: ' + '%(response)s'), + {'status': result.status_code, 'response': result.text}) + raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.')) def _get_host_list(self, servers): host_list = [server.instance.fqdn().lower() for server in servers] @@ -432,9 +443,10 @@ class AmbariClient(): servers, cluster_spec) def _install_components(self, ambari_info, auth, cluster_name, servers): - LOG.info('Starting Hadoop components while scaling up') - LOG.info('Cluster name {0}, Ambari server ip {1}' - .format(cluster_name, ambari_info.get_address())) + LOG.info(_LI('Starting Hadoop components while scaling up')) + LOG.info(_LI('Cluster name %(cluster_name)s, Ambari server ip %(ip)s'), + {'cluster_name': cluster_name, + 'ip': ambari_info.get_address()}) # query for the host components on the given hosts that are in the # INIT state # TODO(jspeidel): provide request context @@ -482,13 +494,13 @@ class AmbariClient(): self._exec_ambari_command(ambari_info, body, start_uri) else: raise ex.HadoopProvisionError( - 'Unable to determine installed service ' - 'components in scaled instances. status' - ' code returned = {0}'.format(result.status)) + _('Unable to determine installed service ' + 'components in scaled instances. status' + ' code returned = {0}').format(result.status)) def wait_for_host_registrations(self, num_hosts, ambari_info): LOG.info( - 'Waiting for all Ambari agents to register with server ...') + _LI('Waiting for all Ambari agents to register with server ...')) url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address()) result = None @@ -501,14 +513,16 @@ class AmbariClient(): result = self._get(url, ambari_info) json_result = json.loads(result.text) - LOG.info('Registered Hosts: {0} of {1}'.format( - len(json_result['items']), num_hosts)) + LOG.info(_LI('Registered Hosts: %(current_number)s of ' + '%(final_number)s'), + {'current_number': len(json_result['items']), + 'final_number': num_hosts}) for hosts in json_result['items']: LOG.debug('Registered Host: {0}'.format( hosts['Hosts']['host_name'])) except requests.ConnectionError: # TODO(jspeidel): max wait time - LOG.info('Waiting to connect to ambari server ...') + LOG.info(_LI('Waiting to connect to ambari server ...')) def update_ambari_admin_user(self, password, ambari_info): old_pwd = ambari_info.password @@ -520,9 +534,9 @@ class AmbariClient(): result = self._put(user_url, ambari_info, data=update_body) if result.status_code != 200: - raise ex.HadoopProvisionError('Unable to update Ambari admin user' - ' credentials: {0}'.format( - result.text)) + raise ex.HadoopProvisionError(_('Unable to update Ambari admin ' + 'user credentials: {0}').format( + result.text)) def add_ambari_user(self, user, ambari_info): user_url = 'http://{0}/api/v1/users/{1}'.format( @@ -536,7 +550,7 @@ class AmbariClient(): if result.status_code != 201: raise ex.HadoopProvisionError( - 'Unable to create Ambari user: {0}'.format(result.text)) + _('Unable to create Ambari user: {0}').format(result.text)) def delete_ambari_user(self, user_name, ambari_info): user_url = 'http://{0}/api/v1/users/{1}'.format( @@ -546,8 +560,9 @@ class AmbariClient(): if result.status_code != 200: raise ex.HadoopProvisionError( - 'Unable to delete Ambari user: {0}' - ' : {1}'.format(user_name, result.text)) + _('Unable to delete Ambari user: %(user_name)s' + ' : %(text)s') % + {'user_name': user_name, 'text': result.text}) def configure_scaled_cluster_instances(self, name, cluster_spec, num_hosts, ambari_info): @@ -570,9 +585,9 @@ class AmbariClient(): def decommission_cluster_instances(self, cluster, clusterspec, instances, ambari_info): - raise exc.InvalidException('The HDP plugin does not support ' - 'the decommissioning of nodes ' - 'for HDP version 1.3.2') + raise exc.InvalidException(_('The HDP plugin does not support ' + 'the decommissioning of nodes ' + 'for HDP version 1.3.2')) def provision_cluster(self, cluster_spec, servers, ambari_info, name): self._add_cluster(ambari_info, name) diff --git a/sahara/plugins/hdp/versions/version_2_0_6/services.py b/sahara/plugins/hdp/versions/version_2_0_6/services.py index fd80e5c0ce..81d47f06ac 100644 --- a/sahara/plugins/hdp/versions/version_2_0_6/services.py +++ b/sahara/plugins/hdp/versions/version_2_0_6/services.py @@ -19,6 +19,9 @@ from oslo.config import cfg import six from sahara import exceptions as e +from sahara.i18n import _ +from sahara.i18n import _LI +from sahara.i18n import _LW from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils @@ -568,8 +571,8 @@ class HBaseService(Service): configurations['global']['hbase_hdfs_root_dir'] = match.group(3) else: raise e.InvalidDataException( - "Invalid value for property 'hbase-site/hbase.rootdir' : %s" % - user_input.value) + _("Invalid value for property 'hbase-site/hbase.rootdir' : %s") + % user_input.value) def finalize_configuration(self, cluster_spec): nn_servers = cluster_spec.determine_component_hosts('NAMENODE') @@ -910,16 +913,16 @@ class HueService(Service): def _create_hue_property_tree(cluster_spec): config_name = 'hue-ini' - LOG.info('Creating Hue ini property tree from configuration named ' - '{0}'.format(config_name)) + LOG.info(_LI('Creating Hue ini property tree from configuration named ' + '{0}').format(config_name)) hue_ini_property_tree = {'sections': {}, 'properties': {}} config = cluster_spec.configurations[config_name] if config is None: - LOG.warning('Missing configuration named {0}, aborting Hue ini ' - 'file creation'.format(config_name)) + LOG.warning(_LW('Missing configuration named {0}, aborting Hue ini' + ' file creation').format(config_name)) else: # replace values in hue-ini configuration subs = {} @@ -1010,18 +1013,19 @@ class HueService(Service): @staticmethod def _merge_configurations(cluster_spec, src_config_name, dst_config_name): - LOG.info('Merging configuration properties: {0} -> {1}' - .format(src_config_name, dst_config_name)) + LOG.info(_LI('Merging configuration properties: %(source)s -> ' + '%(destination)s'), + {'source': src_config_name, 'destination': dst_config_name}) src_config = cluster_spec.configurations[src_config_name] dst_config = cluster_spec.configurations[dst_config_name] if src_config is None: - LOG.warning('Missing source configuration property set, aborting ' - 'merge: {0}'.format(src_config_name)) + LOG.warning(_LW('Missing source configuration property set, ' + 'aborting merge: {0}').format(src_config_name)) elif dst_config is None: - LOG.warning('Missing destination configuration property set, ' - 'aborting merge: {0}'.format(dst_config_name)) + LOG.warning(_LW('Missing destination configuration property set, ' + 'aborting merge: {0}').format(dst_config_name)) else: for property_name, property_value in six.iteritems(src_config): if property_name in dst_config: @@ -1031,14 +1035,16 @@ class HueService(Service): src_config_name, property_name)) else: - LOG.warning('Overwriting existing configuration ' - 'property in {0} from {1} for Hue: {2} ' - '[{3} -> {4}]' - .format(dst_config_name, - src_config_name, - property_name, - dst_config[property_name], - src_config[property_name])) + LOG.warning(_LW('Overwriting existing configuration ' + 'property in %(dst_config_name)s from ' + '%(src_config_name)s for Hue: ' + '%(property_name)s ' + '[%(dst_config)s -> %(src_config)s]'), + {'dst_config_name': dst_config_name, + 'src_config_name': src_config_name, + 'property_name': property_name, + 'dst_config': dst_config[property_name], + 'src_config': src_config[property_name]}) else: LOG.debug('Adding Hue configuration property to {0} from ' '{1}: {2}'.format(dst_config_name, @@ -1051,19 +1057,19 @@ class HueService(Service): def _handle_pre_service_start(instance, cluster_spec, hue_ini, create_user): with instance.remote() as r: - LOG.info('Installing Hue on {0}' + LOG.info(_LI('Installing Hue on {0}') .format(instance.fqdn())) r.execute_command('yum -y install hue', run_as_root=True) - LOG.info('Setting Hue configuration on {0}' + LOG.info(_LI('Setting Hue configuration on {0}') .format(instance.fqdn())) r.write_file_to('/etc/hue/conf/hue.ini', hue_ini, True) - LOG.info('Uninstalling Shell, if it is installed ' - 'on {0}'.format(instance.fqdn())) + LOG.info(_LI('Uninstalling Shell, if it is installed ' + 'on {0}').format(instance.fqdn())) r.execute_command( '/usr/lib/hue/build/env/bin/python ' '/usr/lib/hue/tools/app_reg/app_reg.py ' @@ -1071,12 +1077,12 @@ class HueService(Service): run_as_root=True) if create_user: - LOG.info('Creating initial Hue user on {0}' + LOG.info(_LI('Creating initial Hue user on {0}') .format(instance.fqdn())) r.execute_command('/usr/lib/hue/build/env/bin/hue ' 'create_sandbox_user', run_as_root=True) - LOG.info('(Re)starting Hue on {0}' + LOG.info(_LI('(Re)starting Hue on {0}') .format(instance.fqdn())) java_home = HueService._get_java_home(cluster_spec) @@ -1167,15 +1173,15 @@ class HueService(Service): components = hue_ng.components if 'HDFS_CLIENT' not in components: - LOG.info('Missing HDFS client from Hue node... adding it ' - 'since it is required for Hue') + LOG.info(_LI('Missing HDFS client from Hue node... adding ' + 'it since it is required for Hue')) components.append('HDFS_CLIENT') if cluster_spec.get_deployed_node_group_count('HIVE_SERVER'): if 'HIVE_CLIENT' not in components: - LOG.info('Missing HIVE client from Hue node... adding ' - 'it since it is required for Beeswax and ' - 'HCatalog') + LOG.info(_LI('Missing HIVE client from Hue node... ' + 'adding it since it is required for ' + 'Beeswax and HCatalog')) components.append('HIVE_CLIENT') def pre_service_start(self, cluster_spec, ambari_info, started_services): diff --git a/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py b/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py index db0250a2f9..9f8bd7b039 100644 --- a/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py +++ b/sahara/plugins/hdp/versions/version_2_0_6/versionhandler.py @@ -22,7 +22,11 @@ import requests from sahara import context from sahara import exceptions as exc +from sahara.i18n import _ +from sahara.i18n import _LC +from sahara.i18n import _LE from sahara.i18n import _LI +from sahara.i18n import _LW from sahara.plugins.general import exceptions as ex from sahara.plugins.hdp import clusterspec as cs from sahara.plugins.hdp import configprovider as cfgprov @@ -151,9 +155,9 @@ class AmbariClient(): self.handler.get_version() + '"}}') if result.status_code != 201: - LOG.error('Create cluster command failed. %s' % result.text) + LOG.error(_LE('Create cluster command failed. %s'), result.text) raise ex.HadoopProvisionError( - 'Failed to add cluster: %s' % result.text) + _('Failed to add cluster: %s') % result.text) def _add_configurations_to_cluster( self, cluster_spec, ambari_info, name): @@ -199,10 +203,10 @@ class AmbariClient(): result = self._put(config_url, ambari_info, data=json.dumps(body)) if result.status_code != 200: LOG.error( - 'Set configuration command failed. {0}'.format( + _LE('Set configuration command failed. {0}').format( result.text)) raise ex.HadoopProvisionError( - 'Failed to set configurations on cluster: %s' + _('Failed to set configurations on cluster: %s') % result.text) def _add_services_to_cluster(self, cluster_spec, ambari_info, name): @@ -216,10 +220,11 @@ class AmbariClient(): ambari_info) if result.status_code not in [201, 409]: LOG.error( - 'Create service command failed. {0}'.format( + _LE('Create service command failed. {0}').format( result.text)) raise ex.HadoopProvisionError( - 'Failed to add services to cluster: %s' % result.text) + _('Failed to add services to cluster: %s') + % result.text) def _add_components_to_services(self, cluster_spec, ambari_info, name): add_component_url = ('http://{0}/api/v1/clusters/{1}/services/{' @@ -234,10 +239,10 @@ class AmbariClient(): ambari_info) if result.status_code not in [201, 409]: LOG.error( - 'Create component command failed. {0}'.format( + _LE('Create component command failed. {0}').format( result.text)) raise ex.HadoopProvisionError( - 'Failed to add components to services: %s' + _('Failed to add components to services: %s') % result.text) def _add_hosts_and_components( @@ -253,9 +258,9 @@ class AmbariClient(): ambari_info) if result.status_code != 201: LOG.error( - 'Create host command failed. {0}'.format(result.text)) + _LE('Create host command failed. {0}').format(result.text)) raise ex.HadoopProvisionError( - 'Failed to add host: %s' % result.text) + _('Failed to add host: %s') % result.text) node_group_name = host.node_group.name # TODO(jspeidel): ensure that node group exists @@ -271,13 +276,14 @@ class AmbariClient(): ambari_info) if result.status_code != 201: LOG.error( - 'Create host_component command failed. %s' % + _LE('Create host_component command failed. %s'), result.text) raise ex.HadoopProvisionError( - 'Failed to add host component: %s' % result.text) + _('Failed to add host component: %s') + % result.text) def _install_services(self, cluster_name, ambari_info): - LOG.info('Installing required Hadoop services ...') + LOG.info(_LI('Installing required Hadoop services ...')) ambari_address = ambari_info.get_address() install_url = ('http://{0}/api/v1/clusters/{' @@ -295,17 +301,17 @@ class AmbariClient(): ambari_info, cluster_name, request_id), ambari_info) if success: - LOG.info("Install of Hadoop stack successful.") + LOG.info(_LI("Install of Hadoop stack successful.")) self._finalize_ambari_state(ambari_info) else: - LOG.critical('Install command failed.') + LOG.critical(_LC('Install command failed.')) raise ex.HadoopProvisionError( - 'Installation of Hadoop stack failed.') + _('Installation of Hadoop stack failed.')) elif result.status_code != 200: LOG.error( - 'Install command failed. {0}'.format(result.text)) + _LE('Install command failed. {0}').format(result.text)) raise ex.HadoopProvisionError( - 'Installation of Hadoop stack failed.') + _('Installation of Hadoop stack failed.')) def _get_async_request_uri(self, ambari_info, cluster_name, request_id): return ('http://{0}/api/v1/clusters/{1}/requests/{' @@ -338,7 +344,7 @@ class AmbariClient(): return started def _finalize_ambari_state(self, ambari_info): - LOG.info('Finalizing Ambari cluster state.') + LOG.info(_LI('Finalizing Ambari cluster state.')) persist_state_uri = 'http://{0}/api/v1/persist'.format( ambari_info.get_address()) @@ -349,14 +355,17 @@ class AmbariClient(): result = self._post(persist_state_uri, ambari_info, data=persist_data) if result.status_code != 201 and result.status_code != 202: - LOG.warning('Finalizing of Ambari cluster state failed. {0}'. + LOG.warning(_LW('Finalizing of Ambari cluster state failed. {0}'). format(result.text)) - raise ex.HadoopProvisionError('Unable to finalize Ambari state.') + raise ex.HadoopProvisionError( + _('Unable to finalize Ambari state.')) def start_services(self, cluster_name, cluster_spec, ambari_info): - LOG.info('Starting Hadoop services ...') - LOG.info('Cluster name: {0}, Ambari server address: {1}' - .format(cluster_name, ambari_info.get_address())) + LOG.info(_LI('Starting Hadoop services ...')) + LOG.info(_LI('Cluster name: %(cluster_name)s, Ambari server address:' + ' %(server_address)s'), + {'cluster_name': cluster_name, + 'server_address': ambari_info.get_address()}) start_url = ('http://{0}/api/v1/clusters/{1}/services?ServiceInfo/' 'state=INSTALLED'.format( ambari_info.get_address(), cluster_name)) @@ -374,19 +383,20 @@ class AmbariClient(): request_id), ambari_info) if success: LOG.info( - "Successfully started Hadoop cluster '{0}'.".format( + _LI("Successfully started Hadoop cluster '{0}'.").format( cluster_name)) else: - LOG.critical('Failed to start Hadoop cluster.') + LOG.critical(_LC('Failed to start Hadoop cluster.')) raise ex.HadoopProvisionError( - 'Start of Hadoop services failed.') + _('Start of Hadoop services failed.')) elif result.status_code != 200: LOG.error( - 'Start command failed. Status: {0}, response: {1}'. - format(result.status_code, result.text)) + _LE('Start command failed. Status: %(status)s, response: ' + '%(response)s'), + {'status': result.status_code, 'result': result.text}) raise ex.HadoopProvisionError( - 'Start of Hadoop services failed.') + _('Start of Hadoop services failed.')) def _exec_ambari_command(self, ambari_info, body, cmd_uri): @@ -400,18 +410,19 @@ class AmbariClient(): success = self._wait_for_async_request(href, ambari_info) if success: LOG.info( - "Successfully changed state of Hadoop components ") + _LI("Successfully changed state of Hadoop components ")) else: - LOG.critical('Failed to change state of Hadoop ' - 'components') + LOG.critical(_LC('Failed to change state of Hadoop ' + 'components')) raise ex.HadoopProvisionError( - 'Failed to change state of Hadoop components') + _('Failed to change state of Hadoop components')) else: LOG.error( - 'Command failed. Status: {0}, response: {1}'. - format(result.status_code, result.text)) - raise ex.HadoopProvisionError('Hadoop/Ambari command failed.') + _LE('Command failed. Status: %(status)s, response: ' + '%(response)s'), + {'status': result.status_code, 'result': result.text}) + raise ex.HadoopProvisionError(_('Hadoop/Ambari command failed.')) def _get_host_list(self, servers): host_list = [server.instance.fqdn().lower() for server in servers] @@ -427,9 +438,10 @@ class AmbariClient(): servers, cluster_spec) def _install_components(self, ambari_info, auth, cluster_name, servers): - LOG.info('Starting Hadoop components while scaling up') - LOG.info('Cluster name {0}, Ambari server ip {1}' - .format(cluster_name, ambari_info.get_address())) + LOG.info(_LI('Starting Hadoop components while scaling up')) + LOG.info(_LI('Cluster name %(cluster_name)s, Ambari server ip %(ip)s'), + {'cluster_name': cluster_name, + 'ip': ambari_info.get_address()}) # query for the host components on the given hosts that are in the # INIT state # TODO(jspeidel): provide request context @@ -477,13 +489,13 @@ class AmbariClient(): self._exec_ambari_command(ambari_info, body, start_uri) else: raise ex.HadoopProvisionError( - 'Unable to determine installed service ' - 'components in scaled instances. status' - ' code returned = {0}'.format(result.status)) + _('Unable to determine installed service ' + 'components in scaled instances. status' + ' code returned = {0}').format(result.status)) def wait_for_host_registrations(self, num_hosts, ambari_info): LOG.info( - 'Waiting for all Ambari agents to register with server ...') + _LI('Waiting for all Ambari agents to register with server ...')) url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address()) result = None @@ -496,14 +508,16 @@ class AmbariClient(): result = self._get(url, ambari_info) json_result = json.loads(result.text) - LOG.info('Registered Hosts: {0} of {1}'.format( - len(json_result['items']), num_hosts)) + LOG.info(_LI('Registered Hosts: %(current_number)s ' + 'of %(final_number)s'), + {'current_number': len(json_result['items']), + 'final_number': num_hosts}) for hosts in json_result['items']: LOG.debug('Registered Host: {0}'.format( hosts['Hosts']['host_name'])) except requests.ConnectionError: # TODO(jspeidel): max wait time - LOG.info('Waiting to connect to ambari server ...') + LOG.info(_LI('Waiting to connect to ambari server ...')) def update_ambari_admin_user(self, password, ambari_info): old_pwd = ambari_info.password @@ -515,9 +529,9 @@ class AmbariClient(): result = self._put(user_url, ambari_info, data=update_body) if result.status_code != 200: - raise ex.HadoopProvisionError('Unable to update Ambari admin user' - ' credentials: {0}'.format( - result.text)) + raise ex.HadoopProvisionError(_('Unable to update Ambari admin ' + 'user credentials: {0}').format( + result.text)) def add_ambari_user(self, user, ambari_info): user_url = 'http://{0}/api/v1/users/{1}'.format( @@ -531,7 +545,7 @@ class AmbariClient(): if result.status_code != 201: raise ex.HadoopProvisionError( - 'Unable to create Ambari user: {0}'.format(result.text)) + _('Unable to create Ambari user: {0}').format(result.text)) def delete_ambari_user(self, user_name, ambari_info): user_url = 'http://{0}/api/v1/users/{1}'.format( @@ -541,8 +555,9 @@ class AmbariClient(): if result.status_code != 200: raise ex.HadoopProvisionError( - 'Unable to delete Ambari user: {0}' - ' : {1}'.format(user_name, result.text)) + _('Unable to delete Ambari user: %(user_name)s' + ' : %(text)s') % + {'user_name': user_name, 'text': result.text}) def configure_scaled_cluster_instances(self, name, cluster_spec, num_hosts, ambari_info): @@ -609,17 +624,17 @@ class AmbariClient(): # ask Ambari to decommission the datanodes result = self._post(request_uri, ambari_info, request_body) if result.status_code != 202: - LOG.error('AmbariClient: error while making decommision post ' + - 'request. Error is = ' + result.text) + LOG.error(_LE('AmbariClient: error while making decommision post ' + 'request. Error is = %s'), result.text) raise exc.InvalidException( - 'An error occurred while trying to ' + - 'decommission the DataNode instances that are ' + - 'being shut down. ' + - 'Please consult the Ambari server logs on the ' + - 'master node for ' + - 'more information about the failure.') + _('An error occurred while trying to ' + 'decommission the DataNode instances that are ' + 'being shut down. ' + 'Please consult the Ambari server logs on the ' + 'master node for ' + 'more information about the failure.')) else: - LOG.info('AmbariClient: decommission post request succeeded!') + LOG.info(_LI('AmbariClient: decommission post request succeeded!')) status_template = ('http://{0}/api/v1/clusters/{1}/hosts/{2}/' 'host_components/{3}') @@ -637,17 +652,17 @@ class AmbariClient(): count = 0 while count < 100 and len(hosts_to_decommission) > 0: - LOG.info('AmbariClient: number of hosts waiting for ' + - 'decommisioning to complete = ' + + LOG.info(_LI('AmbariClient: number of hosts waiting for ' + 'decommisioning to complete = %s'), str(len(hosts_to_decommission))) result = self._get(status_request, ambari_info) if result.status_code != 200: - LOG.error('AmbariClient: error in making decomission status ' + - 'request, error = ' + result.text) + LOG.error(_LE('AmbariClient: error in making decomission ' + 'status request, error = %s'), result.text) else: - LOG.info('AmbariClient: decommission status request ok, ' + - 'result = ' + result.text) + LOG.info(_LI('AmbariClient: decommission status request ok, ' + 'result = %s'), result.text) json_result = json.loads(result.text) live_nodes = ( json_result['metrics']['dfs']['namenode']['LiveNodes']) @@ -656,22 +671,24 @@ class AmbariClient(): for node in json_result_nodes.keys(): admin_state = json_result_nodes[node]['adminState'] if admin_state == 'Decommissioned': - LOG.info('AmbariClient: node = ' + node + - ' is now in adminState = ' + - admin_state) + LOG.info(_LI('AmbariClient: node = %(node)s is ' + 'now in adminState = %(admin_state)s'), + {'node': node, + 'admin_state': admin_state}) # remove from list, to track which nodes # are now in Decommissioned state hosts_to_decommission.remove(node) - LOG.info('AmbariClient: sleeping for 5 seconds') + LOG.info(_LI('AmbariClient: sleeping for 5 seconds')) context.sleep(5) # increment loop counter count += 1 if len(hosts_to_decommission) > 0: - LOG.error('AmbariClient: decommissioning process timed-out ' + - 'waiting for nodes to enter "Decommissioned" status.') + LOG.error(_LE('AmbariClient: decommissioning process timed-out ' + 'waiting for nodes to enter "Decommissioned" ' + 'status.')) def provision_cluster(self, cluster_spec, servers, ambari_info, name): self._add_cluster(ambari_info, name) diff --git a/sahara/plugins/provisioning.py b/sahara/plugins/provisioning.py index e45420792d..e8e48063f2 100644 --- a/sahara/plugins/provisioning.py +++ b/sahara/plugins/provisioning.py @@ -15,6 +15,7 @@ from sahara import exceptions as ex +from sahara.i18n import _ from sahara.plugins import base as plugins_base from sahara.utils import resources @@ -153,13 +154,17 @@ class ProvisioningPluginBase(plugins_base.PluginInterface): confs = config_objs_map.get(applicable_target) if not confs: raise ex.ConfigurationError( - "Can't find applicable target '%s' for '%s'" - % (applicable_target, config_name)) + _("Can't find applicable target " + "'%(applicable_target)s' for '%(config_name)s'") + % {"applicable_target": applicable_target, + "config_name": config_name}) conf = confs.get(config_name) if not conf: raise ex.ConfigurationError( - "Can't find config '%s' in '%s'" - % (config_name, applicable_target)) + _("Can't find config '%(config_name)s' " + "in '%(applicable_target)s'") + % {"config_name": config_name, + "applicable_target": applicable_target}) result.append(UserInput( conf, configs[applicable_target][config_name])) diff --git a/sahara/plugins/spark/config_helper.py b/sahara/plugins/spark/config_helper.py index 92444a8d23..656cb97c93 100644 --- a/sahara/plugins/spark/config_helper.py +++ b/sahara/plugins/spark/config_helper.py @@ -16,6 +16,8 @@ from oslo.config import cfg from sahara import conductor as c +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.general import utils from sahara.plugins import provisioning as p @@ -202,8 +204,9 @@ def get_config_value(service, name, cluster=None): if configs.applicable_target == service and configs.name == name: return configs.default_value - raise RuntimeError("Unable to get parameter '%s' from service %s", - name, service) + raise RuntimeError(_("Unable to get parameter '%(param_name)s' from " + "service %(service)s"), + {'param_name': name, 'service': service}) def generate_cfg_from_general(cfg, configs, general_config, @@ -215,7 +218,7 @@ def generate_cfg_from_general(cfg, configs, general_config, for name, value in configs['general'].items(): if value: cfg = _set_config(cfg, general_config, name) - LOG.info("Applying config: %s" % name) + LOG.info(_LI("Applying config: %s"), name) else: cfg = _set_config(cfg, general_config) return cfg diff --git a/sahara/plugins/spark/plugin.py b/sahara/plugins/spark/plugin.py index 13cbc623b5..06f0464038 100644 --- a/sahara/plugins/spark/plugin.py +++ b/sahara/plugins/spark/plugin.py @@ -19,6 +19,8 @@ from oslo.config import cfg from sahara import conductor from sahara import context +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils @@ -48,9 +50,8 @@ class SparkProvider(p.ProvisioningPluginBase): return "Apache Spark" def get_description(self): - return ( - "This plugin provides an ability to launch Spark on Hadoop " - "CDH cluster without any management consoles.") + return _("This plugin provides an ability to launch Spark on Hadoop " + "CDH cluster without any management consoles.") def get_versions(self): return ['1.0.0', '0.9.1'] @@ -70,7 +71,7 @@ class SparkProvider(p.ProvisioningPluginBase): dn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "datanode")]) if dn_count < 1: - raise ex.InvalidComponentCountException("datanode", "1 or more", + raise ex.InvalidComponentCountException("datanode", _("1 or more"), nn_count) # validate Spark Master Node and Spark Slaves @@ -84,7 +85,8 @@ class SparkProvider(p.ProvisioningPluginBase): in utils.get_node_groups(cluster, "slave")]) if sl_count < 1: - raise ex.InvalidComponentCountException("Spark slave", "1 or more", + raise ex.InvalidComponentCountException("Spark slave", + _("1 or more"), sl_count) def update_infra(self, cluster): @@ -106,7 +108,7 @@ class SparkProvider(p.ProvisioningPluginBase): # start the data nodes self._start_slave_datanode_processes(dn_instances) - LOG.info("Hadoop services in cluster %s have been started" % + LOG.info(_LI("Hadoop services in cluster %s have been started"), cluster.name) with remote.get_remote(nn_instance) as r: @@ -118,10 +120,11 @@ class SparkProvider(p.ProvisioningPluginBase): if sm_instance: with remote.get_remote(sm_instance) as r: run.start_spark_master(r, self._spark_home(cluster)) - LOG.info("Spark service at '%s' has been started", + LOG.info(_LI("Spark service at '%s' has been started"), sm_instance.hostname()) - LOG.info('Cluster %s has been started successfully' % cluster.name) + LOG.info(_LI('Cluster %s has been started successfully'), + cluster.name) self._set_cluster_info(cluster) def _spark_home(self, cluster): @@ -373,7 +376,7 @@ class SparkProvider(p.ProvisioningPluginBase): self._start_slave_datanode_processes(instances) run.start_spark_master(r_master, self._spark_home(cluster)) - LOG.info("Spark master service at '%s' has been restarted", + LOG.info(_LI("Spark master service at '%s' has been restarted"), master.hostname()) def _get_scalable_processes(self): @@ -386,9 +389,9 @@ class SparkProvider(p.ProvisioningPluginBase): ng = ug.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( - ng.name, "Spark plugin cannot scale nodegroup" - " with processes: " + - ' '.join(ng.node_processes)) + ng.name, _("Spark plugin cannot scale nodegroup" + " with processes: %s") % + ' '.join(ng.node_processes)) def _validate_existing_ng_scaling(self, cluster, existing): scalable_processes = self._get_scalable_processes() @@ -400,9 +403,9 @@ class SparkProvider(p.ProvisioningPluginBase): dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( - ng.name, "Spark plugin cannot scale nodegroup" - " with processes: " + - ' '.join(ng.node_processes)) + ng.name, _("Spark plugin cannot scale nodegroup" + " with processes: %s") % + ' '.join(ng.node_processes)) dn_amount = len(utils.get_instances(cluster, "datanode")) rep_factor = c_helper.get_config_value('HDFS', "dfs.replication", @@ -410,10 +413,10 @@ class SparkProvider(p.ProvisioningPluginBase): if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: raise ex.ClusterCannotBeScaled( - cluster.name, "Spark plugin cannot shrink cluster because " - "there would be not enough nodes for HDFS " - "replicas (replication factor is %s)" % - rep_factor) + cluster.name, _("Spark plugin cannot shrink cluster because " + "there would be not enough nodes for HDFS " + "replicas (replication factor is %s)") % + rep_factor) def get_edp_engine(self, cluster, job_type, default_engines): '''Select an EDP engine for Spark standalone deployment diff --git a/sahara/plugins/spark/scaling.py b/sahara/plugins/spark/scaling.py index 9d3e7fd841..1fbad1bde5 100644 --- a/sahara/plugins/spark/scaling.py +++ b/sahara/plugins/spark/scaling.py @@ -19,6 +19,7 @@ from oslo.utils import timeutils import six from sahara import context +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils from sahara.plugins.spark import config_helper as c_helper @@ -88,8 +89,10 @@ def decommission_dn(nn, inst_to_be_deleted, survived_inst): if not all_found: ex.DecommissionError( - "Cannot finish decommission of cluster %s in %d seconds" % - (nn.node_group.cluster, timeout)) + _("Cannot finish decommission of cluster %(cluster)s in " + "%(seconds)d seconds") % + {"cluster": nn.node_group.cluster, + "seconds": timeout}) def parse_dfs_report(cmd_output): diff --git a/sahara/plugins/vanilla/hadoop2/config.py b/sahara/plugins/vanilla/hadoop2/config.py index 364167d70c..f826f1056a 100644 --- a/sahara/plugins/vanilla/hadoop2/config.py +++ b/sahara/plugins/vanilla/hadoop2/config.py @@ -15,6 +15,7 @@ import six +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper from sahara.plugins.vanilla.hadoop2 import oozie_helper as o_helper @@ -296,8 +297,8 @@ def _merge_configs(a, b): def configure_topology_data(pctx, cluster): if c_helper.is_data_locality_enabled(pctx, cluster): - LOG.info("Node group awareness is not implemented in YARN yet " - "so enable_hypervisor_awareness set to False explicitly") + LOG.info(_LI("Node group awareness is not implemented in YARN yet " + "so enable_hypervisor_awareness set to False explicitly")) tpl_map = th.generate_topology_map(cluster, is_node_awareness=False) topology_data = "\n".join( [k + " " + v for k, v in tpl_map.items()]) + "\n" diff --git a/sahara/plugins/vanilla/hadoop2/config_helper.py b/sahara/plugins/vanilla/hadoop2/config_helper.py index febc9f4add..4e2bdda84e 100644 --- a/sahara/plugins/vanilla/hadoop2/config_helper.py +++ b/sahara/plugins/vanilla/hadoop2/config_helper.py @@ -16,6 +16,7 @@ from oslo.config import cfg from sahara import exceptions as ex +from sahara.i18n import _ from sahara.plugins import provisioning as p from sahara.utils import types @@ -157,7 +158,8 @@ def get_config_value(pctx, service, name, cluster=None): return c.default_value raise ex.NotFoundException( - name, "Unable to get parameter '%s' from service %s" % (name, service)) + name, _("Unable to get parameter '%(name)s' from service %(service)s") + % {"name": name, "service": service}) def is_swift_enabled(pctx, cluster): diff --git a/sahara/plugins/vanilla/hadoop2/run_scripts.py b/sahara/plugins/vanilla/hadoop2/run_scripts.py index 04a1e1284e..fa5a6867be 100644 --- a/sahara/plugins/vanilla/hadoop2/run_scripts.py +++ b/sahara/plugins/vanilla/hadoop2/run_scripts.py @@ -14,6 +14,8 @@ # limitations under the License. from sahara import context +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex from sahara.plugins.vanilla.hadoop2 import config_helper as c_helper @@ -50,7 +52,7 @@ def _start_processes(instance, processes): 'sudo su - -c "yarn-daemon.sh start %s" hadoop' % process) else: raise ex.HadoopProvisionError( - "Process %s is not supported" % process) + _("Process %s is not supported") % process) def start_hadoop_process(instance, process): @@ -142,12 +144,12 @@ def await_datanodes(cluster): if datanodes_count < 1: return - LOG.info("Waiting %s datanodes to start up" % datanodes_count) + LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count) with vu.get_namenode(cluster).remote() as r: while True: if _check_datanodes_count(r, datanodes_count): LOG.info( - 'Datanodes on cluster %s has been started' % + _LI('Datanodes on cluster %s has been started'), cluster.name) return @@ -155,8 +157,8 @@ def await_datanodes(cluster): if not g.check_cluster_exists(cluster): LOG.info( - 'Stop waiting datanodes on cluster %s since it has ' - 'been deleted' % cluster.name) + _LI('Stop waiting datanodes on cluster %s since it has ' + 'been deleted'), cluster.name) return diff --git a/sahara/plugins/vanilla/hadoop2/scaling.py b/sahara/plugins/vanilla/hadoop2/scaling.py index 1f75aaa7e9..8ed303a15a 100644 --- a/sahara/plugins/vanilla/hadoop2/scaling.py +++ b/sahara/plugins/vanilla/hadoop2/scaling.py @@ -16,6 +16,7 @@ from oslo.utils import timeutils from sahara import context +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils as u from sahara.plugins.vanilla.hadoop2 import config @@ -122,8 +123,9 @@ def _check_decommission(cluster, instances, check_func, timeout): context.sleep(5) else: ex.DecommissionError( - "Cannot finish decommission of cluster %s in %d seconds" % - (cluster, timeout)) + _("Cannot finish decommission of cluster %(cluster)s in " + "%(seconds)d seconds") % + {"cluster": cluster, "seconds": timeout}) def _check_nodemanagers_decommission(cluster, instances): diff --git a/sahara/plugins/vanilla/hadoop2/validation.py b/sahara/plugins/vanilla/hadoop2/validation.py index f72cc0e34b..6af0d13fa9 100644 --- a/sahara/plugins/vanilla/hadoop2/validation.py +++ b/sahara/plugins/vanilla/hadoop2/validation.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils as u from sahara.plugins.vanilla.hadoop2 import config_helper as cu @@ -67,8 +68,9 @@ def validate_cluster_creating(pctx, cluster): rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_count < rep_factor: raise ex.InvalidComponentCountException( - 'datanode', rep_factor, dn_count, 'Number of datanodes must be not' - ' less than dfs.replication.') + 'datanode', rep_factor, dn_count, _('Number of datanodes must be ' + 'not less than ' + 'dfs.replication.')) def validate_additional_ng_scaling(cluster, additional): @@ -78,13 +80,13 @@ def validate_additional_ng_scaling(cluster, additional): for ng_id in additional: ng = gu.get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): - msg = "Vanilla plugin cannot scale nodegroup with processes: %s" + msg = _("Vanilla plugin cannot scale nodegroup with processes: %s") raise ex.NodeGroupCannotBeScaled(ng.name, msg % ' '.join(ng.node_processes)) if not rm and 'nodemanager' in ng.node_processes: - msg = ("Vanilla plugin cannot scale node group with processes " - "which have no master-processes run in cluster") + msg = _("Vanilla plugin cannot scale node group with processes " + "which have no master-processes run in cluster") raise ex.NodeGroupCannotBeScaled(ng.name, msg) @@ -97,8 +99,8 @@ def validate_existing_ng_scaling(pctx, cluster, existing): dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): - msg = ("Vanilla plugin cannot scale nodegroup " - "with processes: %s") + msg = _("Vanilla plugin cannot scale nodegroup " + "with processes: %s") raise ex.NodeGroupCannotBeScaled( ng.name, msg % ' '.join(ng.node_processes)) @@ -106,8 +108,8 @@ def validate_existing_ng_scaling(pctx, cluster, existing): rep_factor = cu.get_config_value(pctx, 'HDFS', 'dfs.replication', cluster) if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: - msg = ("Vanilla plugin cannot shrink cluster because it would be not " - "enough nodes for replicas (replication factor is %s)") + msg = _("Vanilla plugin cannot shrink cluster because it would be " + "not enough nodes for replicas (replication factor is %s)") raise ex.ClusterCannotBeScaled( cluster.name, msg % rep_factor) diff --git a/sahara/plugins/vanilla/plugin.py b/sahara/plugins/vanilla/plugin.py index 5a18614b15..3d7309e9bb 100644 --- a/sahara/plugins/vanilla/plugin.py +++ b/sahara/plugins/vanilla/plugin.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils as u from sahara.plugins import provisioning as p @@ -25,9 +26,9 @@ class VanillaProvider(p.ProvisioningPluginBase): def get_description(self): return ( - "This plugin provides an ability to launch vanilla Apache Hadoop " - "cluster without any management consoles. Also it can " - "deploy Oozie and Hive") + _("This plugin provides an ability to launch vanilla Apache Hadoop" + " cluster without any management consoles. Also it can " + "deploy Oozie and Hive")) def _get_version_handler(self, hadoop_version): return self.version_factory.get_version_handler(hadoop_version) diff --git a/sahara/plugins/vanilla/v1_2_1/config_helper.py b/sahara/plugins/vanilla/v1_2_1/config_helper.py index 5aafb2bd1b..7b5c376ac4 100644 --- a/sahara/plugins/vanilla/v1_2_1/config_helper.py +++ b/sahara/plugins/vanilla/v1_2_1/config_helper.py @@ -18,6 +18,9 @@ from oslo.config import cfg from sahara import conductor as c from sahara import context from sahara import exceptions as ex +from sahara.i18n import _ +from sahara.i18n import _LI +from sahara.i18n import _LW from sahara.openstack.common import log as logging from sahara.plugins.general import utils from sahara.plugins import provisioning as p @@ -207,8 +210,9 @@ def get_config_value(service, name, cluster=None): if configs.applicable_target == service and configs.name == name: return configs.default_value - raise ex.ConfigurationError("Unable get parameter '%s' from service %s" % - (name, service)) + raise ex.ConfigurationError(_("Unable get parameter '%(parameter)s' from " + "service %(service)s") + % {"parameter": name, "service": service}) def generate_cfg_from_general(cfg, configs, general_config, @@ -220,7 +224,7 @@ def generate_cfg_from_general(cfg, configs, general_config, for name, value in configs['general'].items(): if value: cfg = _set_config(cfg, general_config, name) - LOG.info("Applying config: %s" % name) + LOG.info(_LI("Applying config: %s"), name) else: cfg = _set_config(cfg, general_config) return cfg @@ -356,8 +360,8 @@ def extract_environment_confs(configs): if param_name == cfg_name and param_value is not None: lst.append(cfg_format_str % param_value) else: - LOG.warn("Plugin received wrong applicable target '%s' in " - "environmental configs" % service) + LOG.warn(_LW("Plugin received wrong applicable target '%s' in " + "environmental configs"), service) return lst @@ -377,8 +381,8 @@ def extract_xml_confs(configs): if param_name in names and param_value is not None: lst.append((param_name, param_value)) else: - LOG.warn("Plugin received wrong applicable target '%s' for " - "xml configs" % service) + LOG.warn(_LW("Plugin received wrong applicable target '%s' for " + "xml configs"), service) return lst diff --git a/sahara/plugins/vanilla/v1_2_1/scaling.py b/sahara/plugins/vanilla/v1_2_1/scaling.py index 5ff3c13dbd..5a21137644 100644 --- a/sahara/plugins/vanilla/v1_2_1/scaling.py +++ b/sahara/plugins/vanilla/v1_2_1/scaling.py @@ -19,6 +19,7 @@ from oslo.utils import timeutils import six from sahara import context +from sahara.i18n import _ from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils from sahara.plugins.vanilla.v1_2_1 import config_helper @@ -75,8 +76,9 @@ def decommission_dn(nn, inst_to_be_deleted, survived_inst): if not all_found: ex.DecommissionError( - "Cannot finish decommission of cluster %s in %d seconds" % - (nn.node_group.cluster, timeout)) + _("Cannot finish decommission of cluster %(cluster)s in " + "%(seconds)d seconds") % + {"cluster": nn.node_group.cluster, "seconds": timeout}) def parse_dfs_report(cmd_output): diff --git a/sahara/plugins/vanilla/v1_2_1/versionhandler.py b/sahara/plugins/vanilla/v1_2_1/versionhandler.py index d5f8741392..9d3eb3a5d3 100644 --- a/sahara/plugins/vanilla/v1_2_1/versionhandler.py +++ b/sahara/plugins/vanilla/v1_2_1/versionhandler.py @@ -20,6 +20,8 @@ import six from sahara import conductor from sahara import context +from sahara.i18n import _ +from sahara.i18n import _LI from sahara.openstack.common import log as logging from sahara.plugins.general import exceptions as ex from sahara.plugins.general import utils @@ -68,14 +70,14 @@ class VersionHandler(avm.AbstractVersionHandler): in utils.get_node_groups(cluster, "jobtracker")]) if jt_count not in [0, 1]: - raise ex.InvalidComponentCountException("jobtracker", '0 or 1', + raise ex.InvalidComponentCountException("jobtracker", _('0 or 1'), jt_count) oozie_count = sum([ng.count for ng in utils.get_node_groups(cluster, "oozie")]) if oozie_count not in [0, 1]: - raise ex.InvalidComponentCountException("oozie", '0 or 1', + raise ex.InvalidComponentCountException("oozie", _('0 or 1'), oozie_count) hive_count = sum([ng.count for ng @@ -97,7 +99,7 @@ class VersionHandler(avm.AbstractVersionHandler): "jobtracker", required_by="hive") if hive_count not in [0, 1]: - raise ex.InvalidComponentCountException("hive", '0 or 1', + raise ex.InvalidComponentCountException("hive", _('0 or 1'), hive_count) def configure_cluster(self, cluster): @@ -122,7 +124,7 @@ class VersionHandler(avm.AbstractVersionHandler): self._await_datanodes(cluster) - LOG.info("Hadoop services in cluster %s have been started" % + LOG.info(_LI("Hadoop services in cluster %s have been started"), cluster.name) oozie = vu.get_oozie(cluster) @@ -133,7 +135,7 @@ class VersionHandler(avm.AbstractVersionHandler): run.oozie_create_db(r) run.oozie_share_lib(r, nn_instance.hostname()) run.start_oozie(r) - LOG.info("Oozie service at '%s' has been started", + LOG.info(_LI("Oozie service at '%s' has been started"), nn_instance.hostname()) hive_server = vu.get_hiveserver(cluster) @@ -148,10 +150,11 @@ class VersionHandler(avm.AbstractVersionHandler): run.mysql_start(r, hive_server) run.hive_create_db(r) run.hive_metastore_start(r) - LOG.info("Hive Metastore server at %s has been started", + LOG.info(_LI("Hive Metastore server at %s has been " + "started"), hive_server.hostname()) - LOG.info('Cluster %s has been started successfully' % cluster.name) + LOG.info(_LI('Cluster %s has been started successfully'), cluster.name) self._set_cluster_info(cluster) def _await_datanodes(self, cluster): @@ -159,12 +162,12 @@ class VersionHandler(avm.AbstractVersionHandler): if datanodes_count < 1: return - LOG.info("Waiting %s datanodes to start up" % datanodes_count) + LOG.info(_LI("Waiting %s datanodes to start up"), datanodes_count) with remote.get_remote(vu.get_namenode(cluster)) as r: while True: if run.check_datanodes_count(r, datanodes_count): LOG.info( - 'Datanodes on cluster %s has been started' % + _LI('Datanodes on cluster %s has been started'), cluster.name) return @@ -172,8 +175,8 @@ class VersionHandler(avm.AbstractVersionHandler): if not g.check_cluster_exists(cluster): LOG.info( - 'Stop waiting datanodes on cluster %s since it has ' - 'been deleted' % cluster.name) + _LI('Stop waiting datanodes on cluster %s since it has' + ' been deleted'), cluster.name) return def _extract_configs_to_extra(self, cluster): @@ -451,14 +454,14 @@ class VersionHandler(avm.AbstractVersionHandler): ng = self._get_by_id(cluster.node_groups, ng_id) if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( - ng.name, "Vanilla plugin cannot scale nodegroup" - " with processes: " + - ' '.join(ng.node_processes)) + ng.name, _("Vanilla plugin cannot scale nodegroup" + " with processes: %s") % + ' '.join(ng.node_processes)) if not jt and 'tasktracker' in ng.node_processes: raise ex.NodeGroupCannotBeScaled( - ng.name, "Vanilla plugin cannot scale node group with " - "processes which have no master-processes run " - "in cluster") + ng.name, _("Vanilla plugin cannot scale node group with " + "processes which have no master-processes run " + "in cluster")) def _validate_existing_ng_scaling(self, cluster, existing): scalable_processes = self._get_scalable_processes() @@ -470,9 +473,9 @@ class VersionHandler(avm.AbstractVersionHandler): dn_to_delete += ng.count - existing[ng.id] if not set(ng.node_processes).issubset(scalable_processes): raise ex.NodeGroupCannotBeScaled( - ng.name, "Vanilla plugin cannot scale nodegroup" - " with processes: " + - ' '.join(ng.node_processes)) + ng.name, _("Vanilla plugin cannot scale nodegroup" + " with processes: %s") % + ' '.join(ng.node_processes)) dn_amount = len(vu.get_datanodes(cluster)) rep_factor = c_helper.get_config_value('HDFS', 'dfs.replication', @@ -480,6 +483,6 @@ class VersionHandler(avm.AbstractVersionHandler): if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor: raise ex.ClusterCannotBeScaled( - cluster.name, "Vanilla plugin cannot shrink cluster because " - "it would be not enough nodes for replicas " - "(replication factor is %s)" % rep_factor) + cluster.name, _("Vanilla plugin cannot shrink cluster because " + "it would be not enough nodes for replicas " + "(replication factor is %s)") % rep_factor) diff --git a/tox.ini b/tox.ini index 570b6fb0d3..ca84eed3b3 100644 --- a/tox.ini +++ b/tox.ini @@ -61,5 +61,5 @@ builtins = _ exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools [hacking] -import_exceptions = sahara.i18n._, sahara.i18n._LI, sahara.i18n._LW, sahara.i18n._LE +import_exceptions = sahara.i18n._, sahara.i18n._LI, sahara.i18n._LW, sahara.i18n._LE, sahara.i18n._LC local-check-factory = sahara.utils.hacking.checks.factory